From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 68AC01B4B7 for ; Thu, 29 Nov 2018 14:22:08 +0100 (CET) Received: from smtp.corp.redhat.com (int-mx07.intmail.prod.int.phx2.redhat.com [10.5.11.22]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id D170330BA342; Thu, 29 Nov 2018 13:22:07 +0000 (UTC) Received: from ktraynor.remote.csb (ovpn-117-230.ams2.redhat.com [10.36.117.230]) by smtp.corp.redhat.com (Postfix) with ESMTP id 8E7C21001F50; Thu, 29 Nov 2018 13:22:05 +0000 (UTC) From: Kevin Traynor To: Fan Zhang Cc: Maxime Coquelin , dpdk stable Date: Thu, 29 Nov 2018 13:20:14 +0000 Message-Id: <20181129132128.7609-14-ktraynor@redhat.com> In-Reply-To: <20181129132128.7609-1-ktraynor@redhat.com> References: <20181129132128.7609-1-ktraynor@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Scanned-By: MIMEDefang 2.84 on 10.5.11.22 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.49]); Thu, 29 Nov 2018 13:22:07 +0000 (UTC) Subject: [dpdk-stable] patch 'vhost/crypto: fix packet copy in chaining mode' has been queued to stable release 18.08.1 X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 29 Nov 2018 13:22:09 -0000 Hi, FYI, your patch has been queued to stable release 18.08.1 Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet. It will be pushed if I get no objections before 12/08/18. So please shout if anyone has objections. Also note that after the patch there's a diff of the upstream commit vs the patch applied to the branch. If the code is different (ie: not only metadata diffs), due for example to a change in context or macro names, please double check it. Thanks. Kevin Traynor --- >>From fac3c5ab38573c74522e5e4ab479087ef1293a4e Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Tue, 6 Nov 2018 16:22:48 +0000 Subject: [PATCH] vhost/crypto: fix packet copy in chaining mode [ upstream commit cd1e8f03abf0323e6210ae89a80b89b242a35260 ] This patch fixes the incorrect packet content copy in the chaining mode. Originally the content before cipher offset is overwritten by all zeros. This patch fixes the problem by making sure the correct write back source and destination settings during set up. Fixes: 3bb595ecd682 ("vhost/crypto: add request handler") Signed-off-by: Fan Zhang Reviewed-by: Maxime Coquelin --- lib/librte_vhost/vhost_crypto.c | 464 ++++++++++++++++++++++++-------- 1 file changed, 350 insertions(+), 114 deletions(-) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c index 1affeddfe..340c8a942 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -199,4 +199,5 @@ struct vhost_crypto { struct rte_mempool *mbuf_pool; struct rte_mempool *sess_pool; + struct rte_mempool *wb_pool; /** DPDK cryptodev ID */ @@ -216,4 +217,11 @@ struct vhost_crypto { } __rte_cache_aligned; +struct vhost_crypto_writeback_data { + uint8_t *src; + uint8_t *dst; + uint64_t len; + struct vhost_crypto_writeback_data *next; +}; + struct vhost_crypto_data_req { struct vring_desc *head; @@ -221,6 +229,6 @@ struct vhost_crypto_data_req { struct virtio_crypto_inhdr *inhdr; struct vhost_virtqueue *vq; - struct vring_desc *wb_desc; - uint16_t wb_len; + struct vhost_crypto_writeback_data *wb; + struct rte_mempool *wb_pool; uint16_t desc_idx; uint16_t len; @@ -508,8 +516,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, } - if (unlikely(left > 0)) { - VC_LOG_ERR("Incorrect virtio descriptor"); + if (unlikely(left > 0)) return -1; - } *cur_desc = &head[desc->next]; @@ -517,4 +523,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, } +static __rte_always_inline void * +get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc, + uint8_t perm) +{ + void *data; + uint64_t dlen = cur_desc->len; + + data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm); + if (unlikely(!data || dlen != cur_desc->len)) { + VC_LOG_ERR("Failed to map object"); + return NULL; + } + + return data; +} + static int copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, @@ -533,8 +555,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, VHOST_ACCESS_RO); - if (unlikely(!src || !dlen)) { - VC_LOG_ERR("Failed to map descriptor"); + if (unlikely(!src || !dlen)) return -1; - } rte_memcpy((uint8_t *)data, src, dlen); @@ -611,71 +631,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, } -static __rte_always_inline void * -get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc, - uint32_t size, uint8_t perm) +static void +write_back_data(struct vhost_crypto_data_req *vc_req) { - void *data; - uint64_t dlen = (*cur_desc)->len; + struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last; - data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm); - if (unlikely(!data || dlen != (*cur_desc)->len)) { - VC_LOG_ERR("Failed to map object"); - return NULL; + while (wb_data) { + rte_prefetch0(wb_data->next); + rte_memcpy(wb_data->dst, wb_data->src, wb_data->len); + wb_last = wb_data; + wb_data = wb_data->next; + rte_mempool_put(vc_req->wb_pool, wb_last); } +} - if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0)) - return NULL; +static void +free_wb_data(struct vhost_crypto_writeback_data *wb_data, + struct rte_mempool *mp) +{ + while (wb_data->next != NULL) + free_wb_data(wb_data->next, mp); - return data; + rte_mempool_put(mp, wb_data); } -static int -write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req) +/** + * The function will allocate a vhost_crypto_writeback_data linked list + * containing the source and destination data pointers for the write back + * operation after dequeued from Cryptodev PMD queues. + * + * @param vc_req + * The vhost crypto data request pointer + * @param cur_desc + * The pointer of the current in use descriptor pointer. The content of + * cur_desc is expected to be updated after the function execution. + * @param end_wb_data + * The last write back data element to be returned. It is used only in cipher + * and hash chain operations. + * @param src + * The source data pointer + * @param offset + * The offset to both source and destination data. For source data the offset + * is the number of bytes between src and start point of cipher operation. For + * destination data the offset is the number of bytes from *cur_desc->addr + * to the point where the src will be written to. + * @param write_back_len + * The size of the write back length. + * @return + * The pointer to the start of the write back data linked list. + */ +static struct vhost_crypto_writeback_data * +prepare_write_back_data(struct vhost_crypto_data_req *vc_req, + struct vring_desc **cur_desc, + struct vhost_crypto_writeback_data **end_wb_data, + uint8_t *src, + uint32_t offset, + uint64_t write_back_len) { - struct rte_mbuf *mbuf = op->sym->m_dst; - struct vring_desc *head = vc_req->head; - struct vring_desc *desc = vc_req->wb_desc; - int left = vc_req->wb_len; - uint32_t to_write; - uint8_t *src_data = mbuf->buf_addr, *dst; + struct vhost_crypto_writeback_data *wb_data, *head; + struct vring_desc *desc = *cur_desc; uint64_t dlen; + uint8_t *dst; + int ret; - rte_prefetch0(&head[desc->next]); - to_write = RTE_MIN(desc->len, (uint32_t)left); - dlen = desc->len; - dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, - VHOST_ACCESS_RW); - if (unlikely(!dst || dlen != desc->len)) { - VC_LOG_ERR("Failed to map descriptor"); - return -1; + ret = rte_mempool_get(vc_req->wb_pool, (void **)&head); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; } - rte_memcpy(dst, src_data, to_write); - left -= to_write; - src_data += to_write; + wb_data = head; - while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) { - desc = &head[desc->next]; - rte_prefetch0(&head[desc->next]); - to_write = RTE_MIN(desc->len, (uint32_t)left); + if (likely(desc->len > offset)) { + wb_data->src = src + offset; dlen = desc->len; - dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, - VHOST_ACCESS_RW); + dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, + &dlen, VHOST_ACCESS_RW) + offset; if (unlikely(!dst || dlen != desc->len)) { VC_LOG_ERR("Failed to map descriptor"); - return -1; + goto error_exit; } - rte_memcpy(dst, src_data, to_write); - left -= to_write; - src_data += to_write; - } + wb_data->dst = dst; + wb_data->len = desc->len - offset; + write_back_len -= wb_data->len; + src += offset + wb_data->len; + offset = 0; + + if (unlikely(write_back_len)) { + ret = rte_mempool_get(vc_req->wb_pool, + (void **)&(wb_data->next)); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; + } + + wb_data = wb_data->next; + } else + wb_data->next = NULL; + } else + offset -= desc->len; + + while (write_back_len) { + desc = &vc_req->head[desc->next]; + if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) { + VC_LOG_ERR("incorrect descriptor"); + goto error_exit; + } + + if (desc->len <= offset) { + offset -= desc->len; + continue; + } - if (unlikely(left < 0)) { - VC_LOG_ERR("Incorrect virtio descriptor"); - return -1; + dlen = desc->len; + dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, + VHOST_ACCESS_RW) + offset; + if (unlikely(dst == NULL || dlen != desc->len)) { + VC_LOG_ERR("Failed to map descriptor"); + goto error_exit; + } + + wb_data->src = src; + wb_data->dst = dst; + wb_data->len = RTE_MIN(desc->len - offset, write_back_len); + write_back_len -= wb_data->len; + src += wb_data->len; + offset = 0; + + if (write_back_len) { + ret = rte_mempool_get(vc_req->wb_pool, + (void **)&(wb_data->next)); + if (unlikely(ret < 0)) { + VC_LOG_ERR("no memory"); + goto error_exit; + } + + wb_data = wb_data->next; + } else + wb_data->next = NULL; } - return 0; + *cur_desc = &vc_req->head[desc->next]; + + *end_wb_data = wb_data; + + return head; + +error_exit: + if (head) + free_wb_data(head, vc_req->wb_pool); + + return NULL; } @@ -687,4 +792,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, { struct vring_desc *desc = cur_desc; + struct vhost_crypto_writeback_data *ewb = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); @@ -705,6 +811,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len); - m_src->buf_addr = get_data_ptr(vc_req, &desc, - cipher->para.src_data_len, VHOST_ACCESS_RO); + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { @@ -713,6 +818,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + + if (unlikely(move_desc(vc_req->head, &desc, + cipher->para.src_data_len) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; + if (unlikely(cipher->para.src_data_len > RTE_MBUF_DEFAULT_BUF_SIZE)) { @@ -745,6 +860,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(vc_req, &desc, - cipher->para.dst_data_len, VHOST_ACCESS_RW); + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); @@ -753,14 +867,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } + if (unlikely(move_desc(vc_req->head, &desc, + cipher->para.dst_data_len) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + m_dst->data_len = cipher->para.dst_data_len; break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - vc_req->wb_desc = desc; - vc_req->wb_len = cipher->para.dst_data_len; - if (unlikely(move_desc(vc_req->head, &desc, - vc_req->wb_len) < 0)) { + vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), 0, + cipher->para.dst_data_len); + if (unlikely(vc_req->wb == NULL)) { ret = VIRTIO_CRYPTO_ERR; goto error_exit; } + break; default: @@ -776,5 +898,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, op->sym->cipher.data.length = cipher->para.src_data_len; - vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; @@ -788,4 +910,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, error_exit: + if (vc_req->wb) + free_wb_data(vc_req->wb, vc_req->wb_pool); + vc_req->len = INHDR_LEN; return ret; @@ -798,5 +923,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vring_desc *cur_desc) { - struct vring_desc *desc = cur_desc; + struct vring_desc *desc = cur_desc, *digest_desc; + struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); @@ -814,12 +940,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_src->data_len = chain->para.src_data_len; - m_dst->data_len = chain->para.dst_data_len; switch (vcrypto->option) { case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + m_dst->data_len = chain->para.dst_data_len; + m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.src_data_len); - m_src->buf_addr = get_data_ptr(vc_req, &desc, - chain->para.src_data_len, VHOST_ACCESS_RO); + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); @@ -827,6 +953,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + + if (unlikely(move_desc(vc_req->head, &desc, + chain->para.src_data_len) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + vc_req->wb_pool = vcrypto->wb_pool; + if (unlikely(chain->para.src_data_len > RTE_MBUF_DEFAULT_BUF_SIZE)) { @@ -840,4 +975,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + break; default: @@ -858,6 +994,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(vc_req, &desc, - chain->para.dst_data_len, VHOST_ACCESS_RW); + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); @@ -866,8 +1001,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } + if (unlikely(move_desc(vc_req->head, &desc, + chain->para.dst_data_len) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.hash_result_len); - op->sym->auth.digest.data = get_data_ptr(vc_req, &desc, - chain->para.hash_result_len, VHOST_ACCESS_RW); + op->sym->auth.digest.data = get_data_ptr(vc_req, desc, + VHOST_ACCESS_RW); if (unlikely(op->sym->auth.digest.phys_addr == 0)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); @@ -875,20 +1017,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + + if (unlikely(move_desc(vc_req->head, &desc, + chain->para.hash_result_len) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + break; case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: - digest_offset = m_dst->data_len; - digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *, + vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb, + rte_pktmbuf_mtod(m_src, uint8_t *), + chain->para.cipher_start_src_offset, + chain->para.dst_data_len - + chain->para.cipher_start_src_offset); + if (unlikely(vc_req->wb == NULL)) { + ret = VIRTIO_CRYPTO_ERR; + goto error_exit; + } + + digest_offset = m_src->data_len; + digest_addr = rte_pktmbuf_mtod_offset(m_src, void *, digest_offset); + digest_desc = desc; - vc_req->wb_desc = desc; - vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len; - - if (unlikely(move_desc(vc_req->head, &desc, - chain->para.dst_data_len) < 0)) { - ret = VIRTIO_CRYPTO_BADMSG; + /** create a wb_data for digest */ + ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2, + digest_addr, 0, chain->para.hash_result_len); + if (unlikely(ewb->next == NULL)) { + ret = VIRTIO_CRYPTO_ERR; goto error_exit; } - if (unlikely(copy_data(digest_addr, vc_req, &desc, + if (unlikely(copy_data(digest_addr, vc_req, &digest_desc, chain->para.hash_result_len)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; @@ -897,5 +1057,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, op->sym->auth.digest.data = digest_addr; - op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst, + op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src, digest_offset); break; @@ -906,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* record inhdr */ - vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; @@ -929,4 +1089,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, error_exit: + if (vc_req->wb) + free_wb_data(vc_req->wb, vc_req->wb_pool); vc_req->len = INHDR_LEN; return ret; @@ -969,5 +1131,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, vc_req->zero_copy = vcrypto->option; - req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO); + req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(req == NULL)) { switch (vcrypto->option) { @@ -990,4 +1152,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, goto error_exit; } + } else { + if (unlikely(move_desc(vc_req->head, &desc, + sizeof(*req)) < 0)) { + VC_LOG_ERR("Incorrect descriptor"); + goto error_exit; + } } @@ -1064,5 +1232,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src); uint16_t desc_idx; - int ret = 0; if (unlikely(!vc_req)) { @@ -1079,9 +1246,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; else { - if (vc_req->zero_copy == 0) { - ret = write_back_data(op, vc_req); - if (unlikely(ret != 0)) - vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; - } + if (vc_req->zero_copy == 0) + write_back_data(vc_req); } @@ -1089,7 +1253,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, vc_req->vq->used->ring[desc_idx].len = vc_req->len; - rte_mempool_put(m_dst->pool, (void *)m_dst); rte_mempool_put(m_src->pool, (void *)m_src); + if (m_dst) + rte_mempool_put(m_dst->pool, (void *)m_dst); + return vc_req->vq; } @@ -1188,4 +1354,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, } + snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid); + vcrypto->wb_pool = rte_mempool_create(name, + VHOST_CRYPTO_MBUF_POOL_SIZE, + sizeof(struct vhost_crypto_writeback_data), + 128, 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (!vcrypto->wb_pool) { + VC_LOG_ERR("Failed to creath mempool"); + ret = -ENOMEM; + goto error_exit; + } + dev->extern_data = vcrypto; dev->extern_ops.pre_msg_handle = NULL; @@ -1224,4 +1402,5 @@ rte_vhost_crypto_free(int vid) rte_hash_free(vcrypto->session_map); rte_mempool_free(vcrypto->mbuf_pool); + rte_mempool_free(vcrypto->wb_pool); rte_free(vcrypto); @@ -1259,9 +1438,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option) return 0; - if (!(rte_mempool_full(vcrypto->mbuf_pool))) { + if (!(rte_mempool_full(vcrypto->mbuf_pool)) || + !(rte_mempool_full(vcrypto->wb_pool))) { VC_LOG_ERR("Cannot update zero copy as mempool is not full"); return -EINVAL; } + if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) { + char name[128]; + + snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid); + vcrypto->wb_pool = rte_mempool_create(name, + VHOST_CRYPTO_MBUF_POOL_SIZE, + sizeof(struct vhost_crypto_writeback_data), + 128, 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (!vcrypto->wb_pool) { + VC_LOG_ERR("Failed to creath mbuf pool"); + return -ENOMEM; + } + } else { + rte_mempool_free(vcrypto->wb_pool); + vcrypto->wb_pool = NULL; + } + vcrypto->option = (uint8_t)option; @@ -1279,7 +1477,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, uint16_t avail_idx; uint16_t start_idx; - uint16_t required; uint16_t count; - uint16_t i; + uint16_t i = 0; if (unlikely(dev == NULL)) { @@ -1313,25 +1510,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, * we need only 1 mbuf as src and dst */ - required = count * 2; - if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs, - required) < 0)) { - VC_LOG_ERR("Insufficient memory"); - return -ENOMEM; - } - - for (i = 0; i < count; i++) { - uint16_t used_idx = (start_idx + i) & (vq->size - 1); - uint16_t desc_idx = vq->avail->ring[used_idx]; - struct vring_desc *head = &vq->desc[desc_idx]; - struct rte_crypto_op *op = ops[i]; - - op->sym->m_src = mbufs[i * 2]; - op->sym->m_dst = mbufs[i * 2 + 1]; - op->sym->m_src->data_off = 0; - op->sym->m_dst->data_off = 0; - - if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head, - desc_idx)) < 0) - break; + switch (vcrypto->option) { + case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE: + if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, + (void **)mbufs, count * 2) < 0)) { + VC_LOG_ERR("Insufficient memory"); + return -ENOMEM; + } + + for (i = 0; i < count; i++) { + uint16_t used_idx = (start_idx + i) & (vq->size - 1); + uint16_t desc_idx = vq->avail->ring[used_idx]; + struct vring_desc *head = &vq->desc[desc_idx]; + struct rte_crypto_op *op = ops[i]; + + op->sym->m_src = mbufs[i * 2]; + op->sym->m_dst = mbufs[i * 2 + 1]; + op->sym->m_src->data_off = 0; + op->sym->m_dst->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, + op, head, desc_idx)) < 0) + break; + } + + if (unlikely(i < count)) + rte_mempool_put_bulk(vcrypto->mbuf_pool, + (void **)&mbufs[i * 2], + (count - i) * 2); + + break; + + case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE: + if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, + (void **)mbufs, count) < 0)) { + VC_LOG_ERR("Insufficient memory"); + return -ENOMEM; + } + + for (i = 0; i < count; i++) { + uint16_t used_idx = (start_idx + i) & (vq->size - 1); + uint16_t desc_idx = vq->avail->ring[used_idx]; + struct vring_desc *head = &vq->desc[desc_idx]; + struct rte_crypto_op *op = ops[i]; + + op->sym->m_src = mbufs[i]; + op->sym->m_dst = NULL; + op->sym->m_src->data_off = 0; + + if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, + op, head, desc_idx)) < 0) + break; + } + + if (unlikely(i < count)) + rte_mempool_put_bulk(vcrypto->mbuf_pool, + (void **)&mbufs[i], + count - i); + + break; + } -- 2.19.0 --- Diff of the applied patch vs upstream commit (please double-check if non-empty: --- --- - 2018-11-29 13:11:35.371781800 +0000 +++ 0013-vhost-crypto-fix-packet-copy-in-chaining-mode.patch 2018-11-29 13:11:34.000000000 +0000 @@ -1,8 +1,10 @@ -From cd1e8f03abf0323e6210ae89a80b89b242a35260 Mon Sep 17 00:00:00 2001 +From fac3c5ab38573c74522e5e4ab479087ef1293a4e Mon Sep 17 00:00:00 2001 From: Fan Zhang Date: Tue, 6 Nov 2018 16:22:48 +0000 Subject: [PATCH] vhost/crypto: fix packet copy in chaining mode +[ upstream commit cd1e8f03abf0323e6210ae89a80b89b242a35260 ] + This patch fixes the incorrect packet content copy in the chaining mode. Originally the content before cipher offset is overwritten by all zeros. This patch fixes the problem by @@ -10,7 +12,6 @@ settings during set up. Fixes: 3bb595ecd682 ("vhost/crypto: add request handler") -Cc: stable@dpdk.org Signed-off-by: Fan Zhang Reviewed-by: Maxime Coquelin @@ -19,7 +20,7 @@ 1 file changed, 350 insertions(+), 114 deletions(-) diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c -index 5472bead0..dd01afc08 100644 +index 1affeddfe..340c8a942 100644 --- a/lib/librte_vhost/vhost_crypto.c +++ b/lib/librte_vhost/vhost_crypto.c @@ -199,4 +199,5 @@ struct vhost_crypto { @@ -49,7 +50,7 @@ + struct rte_mempool *wb_pool; uint16_t desc_idx; uint16_t len; -@@ -507,8 +515,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, +@@ -508,8 +516,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, } - if (unlikely(left > 0)) { @@ -59,7 +60,7 @@ - } *cur_desc = &head[desc->next]; -@@ -516,4 +522,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, +@@ -517,4 +523,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc, } +static __rte_always_inline void * @@ -80,7 +81,7 @@ + static int copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, -@@ -532,8 +554,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, +@@ -533,8 +555,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen, VHOST_ACCESS_RO); - if (unlikely(!src || !dlen)) { @@ -90,7 +91,7 @@ - } rte_memcpy((uint8_t *)data, src, dlen); -@@ -610,71 +630,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, +@@ -611,71 +631,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req, } -static __rte_always_inline void * @@ -293,13 +294,13 @@ + return NULL; } -@@ -686,4 +791,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -687,4 +792,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, { struct vring_desc *desc = cur_desc; + struct vhost_crypto_writeback_data *ewb = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); -@@ -704,6 +810,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -705,6 +811,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.src_data_len); - m_src->buf_addr = get_data_ptr(vc_req, &desc, @@ -307,7 +308,7 @@ + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { -@@ -712,6 +817,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -713,6 +818,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + @@ -324,7 +325,7 @@ + if (unlikely(cipher->para.src_data_len > RTE_MBUF_DEFAULT_BUF_SIZE)) { -@@ -744,6 +859,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -745,6 +860,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, cipher->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(vc_req, &desc, @@ -332,7 +333,7 @@ + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); -@@ -752,14 +866,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -753,14 +867,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } + if (unlikely(move_desc(vc_req->head, &desc, @@ -359,14 +360,14 @@ + break; default: -@@ -775,5 +897,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -776,5 +898,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, op->sym->cipher.data.length = cipher->para.src_data_len; - vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; -@@ -787,4 +909,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -788,4 +910,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, error_exit: + if (vc_req->wb) @@ -374,7 +375,7 @@ + vc_req->len = INHDR_LEN; return ret; -@@ -797,5 +922,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -798,5 +923,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, struct vring_desc *cur_desc) { - struct vring_desc *desc = cur_desc; @@ -382,7 +383,7 @@ + struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL; struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst; uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET); -@@ -813,12 +939,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -814,12 +940,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_src->data_len = chain->para.src_data_len; - m_dst->data_len = chain->para.dst_data_len; @@ -398,7 +399,7 @@ + m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); -@@ -826,6 +952,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -827,6 +953,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + @@ -414,13 +415,13 @@ + if (unlikely(chain->para.src_data_len > RTE_MBUF_DEFAULT_BUF_SIZE)) { -@@ -839,4 +974,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -840,4 +975,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + break; default: -@@ -857,6 +993,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -858,6 +994,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, m_dst->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr, chain->para.dst_data_len); - m_dst->buf_addr = get_data_ptr(vc_req, &desc, @@ -428,7 +429,7 @@ + m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW); if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); -@@ -865,8 +1000,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -866,8 +1001,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, } + if (unlikely(move_desc(vc_req->head, &desc, @@ -446,7 +447,7 @@ + VHOST_ACCESS_RW); if (unlikely(op->sym->auth.digest.phys_addr == 0)) { VC_LOG_ERR("zero_copy may fail due to cross page data"); -@@ -874,20 +1016,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -875,20 +1017,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, goto error_exit; } + @@ -494,35 +495,35 @@ + if (unlikely(copy_data(digest_addr, vc_req, &digest_desc, chain->para.hash_result_len)) < 0) { ret = VIRTIO_CRYPTO_BADMSG; -@@ -896,5 +1056,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -897,5 +1057,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, op->sym->auth.digest.data = digest_addr; - op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst, + op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src, digest_offset); break; -@@ -905,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -906,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, /* record inhdr */ - vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO); + vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO); if (unlikely(vc_req->inhdr == NULL)) { ret = VIRTIO_CRYPTO_BADMSG; -@@ -928,4 +1088,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, +@@ -929,4 +1089,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op, error_exit: + if (vc_req->wb) + free_wb_data(vc_req->wb, vc_req->wb_pool); vc_req->len = INHDR_LEN; return ret; -@@ -968,5 +1130,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, +@@ -969,5 +1131,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, vc_req->zero_copy = vcrypto->option; - req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO); + req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO); if (unlikely(req == NULL)) { switch (vcrypto->option) { -@@ -989,4 +1151,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, +@@ -990,4 +1152,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto, goto error_exit; } + } else { @@ -533,13 +534,13 @@ + } } -@@ -1063,5 +1231,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, +@@ -1064,5 +1232,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src); uint16_t desc_idx; - int ret = 0; if (unlikely(!vc_req)) { -@@ -1078,9 +1245,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, +@@ -1079,9 +1246,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, vc_req->inhdr->status = VIRTIO_CRYPTO_ERR; else { - if (vc_req->zero_copy == 0) { @@ -551,7 +552,7 @@ + write_back_data(vc_req); } -@@ -1088,7 +1252,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, +@@ -1089,7 +1253,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op, vc_req->vq->used->ring[desc_idx].len = vc_req->len; - rte_mempool_put(m_dst->pool, (void *)m_dst); @@ -562,7 +563,7 @@ + return vc_req->vq; } -@@ -1187,4 +1353,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, +@@ -1188,4 +1354,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id, } + snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid); @@ -579,13 +580,13 @@ + dev->extern_data = vcrypto; dev->extern_ops.pre_msg_handle = NULL; -@@ -1223,4 +1401,5 @@ rte_vhost_crypto_free(int vid) +@@ -1224,4 +1402,5 @@ rte_vhost_crypto_free(int vid) rte_hash_free(vcrypto->session_map); rte_mempool_free(vcrypto->mbuf_pool); + rte_mempool_free(vcrypto->wb_pool); rte_free(vcrypto); -@@ -1258,9 +1437,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option) +@@ -1259,9 +1438,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option) return 0; - if (!(rte_mempool_full(vcrypto->mbuf_pool))) { @@ -615,7 +616,7 @@ + vcrypto->option = (uint8_t)option; -@@ -1278,7 +1476,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, +@@ -1279,7 +1477,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, uint16_t avail_idx; uint16_t start_idx; - uint16_t required; @@ -624,7 +625,7 @@ + uint16_t i = 0; if (unlikely(dev == NULL)) { -@@ -1312,25 +1509,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, +@@ -1313,25 +1510,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid, * we need only 1 mbuf as src and dst */ - required = count * 2;