patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Kevin Traynor <ktraynor@redhat.com>
To: Fan Zhang <roy.fan.zhang@intel.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	dpdk stable <stable@dpdk.org>
Subject: [dpdk-stable] patch 'vhost/crypto: fix packet copy in chaining mode' has been queued to stable release 18.08.1
Date: Thu, 29 Nov 2018 13:20:14 +0000	[thread overview]
Message-ID: <20181129132128.7609-14-ktraynor@redhat.com> (raw)
In-Reply-To: <20181129132128.7609-1-ktraynor@redhat.com>

Hi,

FYI, your patch has been queued to stable release 18.08.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/08/18. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the patch applied
to the branch. If the code is different (ie: not only metadata diffs), due for example to
a change in context or macro names, please double check it.

Thanks.

Kevin Traynor

---
>From fac3c5ab38573c74522e5e4ab479087ef1293a4e Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Tue, 6 Nov 2018 16:22:48 +0000
Subject: [PATCH] vhost/crypto: fix packet copy in chaining mode

[ upstream commit cd1e8f03abf0323e6210ae89a80b89b242a35260 ]

This patch fixes the incorrect packet content copy in the
chaining mode. Originally the content before cipher offset is
overwritten by all zeros. This patch fixes the problem by
making sure the correct write back source and destination
settings during set up.

Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/vhost_crypto.c | 464 ++++++++++++++++++++++++--------
 1 file changed, 350 insertions(+), 114 deletions(-)

diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 1affeddfe..340c8a942 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -199,4 +199,5 @@ struct vhost_crypto {
 	struct rte_mempool *mbuf_pool;
 	struct rte_mempool *sess_pool;
+	struct rte_mempool *wb_pool;
 
 	/** DPDK cryptodev ID */
@@ -216,4 +217,11 @@ struct vhost_crypto {
 } __rte_cache_aligned;
 
+struct vhost_crypto_writeback_data {
+	uint8_t *src;
+	uint8_t *dst;
+	uint64_t len;
+	struct vhost_crypto_writeback_data *next;
+};
+
 struct vhost_crypto_data_req {
 	struct vring_desc *head;
@@ -221,6 +229,6 @@ struct vhost_crypto_data_req {
 	struct virtio_crypto_inhdr *inhdr;
 	struct vhost_virtqueue *vq;
-	struct vring_desc *wb_desc;
-	uint16_t wb_len;
+	struct vhost_crypto_writeback_data *wb;
+	struct rte_mempool *wb_pool;
 	uint16_t desc_idx;
 	uint16_t len;
@@ -508,8 +516,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 	}
 
-	if (unlikely(left > 0)) {
-		VC_LOG_ERR("Incorrect virtio descriptor");
+	if (unlikely(left > 0))
 		return -1;
-	}
 
 	*cur_desc = &head[desc->next];
@@ -517,4 +523,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 }
 
+static __rte_always_inline void *
+get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+		uint8_t perm)
+{
+	void *data;
+	uint64_t dlen = cur_desc->len;
+
+	data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
+	if (unlikely(!data || dlen != cur_desc->len)) {
+		VC_LOG_ERR("Failed to map object");
+		return NULL;
+	}
+
+	return data;
+}
+
 static int
 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
@@ -533,8 +555,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
 			VHOST_ACCESS_RO);
-	if (unlikely(!src || !dlen)) {
-		VC_LOG_ERR("Failed to map descriptor");
+	if (unlikely(!src || !dlen))
 		return -1;
-	}
 
 	rte_memcpy((uint8_t *)data, src, dlen);
@@ -611,71 +631,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 }
 
-static __rte_always_inline void *
-get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,
-		uint32_t size, uint8_t perm)
+static void
+write_back_data(struct vhost_crypto_data_req *vc_req)
 {
-	void *data;
-	uint64_t dlen = (*cur_desc)->len;
+	struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
 
-	data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);
-	if (unlikely(!data || dlen != (*cur_desc)->len)) {
-		VC_LOG_ERR("Failed to map object");
-		return NULL;
+	while (wb_data) {
+		rte_prefetch0(wb_data->next);
+		rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
+		wb_last = wb_data;
+		wb_data = wb_data->next;
+		rte_mempool_put(vc_req->wb_pool, wb_last);
 	}
+}
 
-	if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))
-		return NULL;
+static void
+free_wb_data(struct vhost_crypto_writeback_data *wb_data,
+		struct rte_mempool *mp)
+{
+	while (wb_data->next != NULL)
+		free_wb_data(wb_data->next, mp);
 
-	return data;
+	rte_mempool_put(mp, wb_data);
 }
 
-static int
-write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)
+/**
+ * The function will allocate a vhost_crypto_writeback_data linked list
+ * containing the source and destination data pointers for the write back
+ * operation after dequeued from Cryptodev PMD queues.
+ *
+ * @param vc_req
+ *   The vhost crypto data request pointer
+ * @param cur_desc
+ *   The pointer of the current in use descriptor pointer. The content of
+ *   cur_desc is expected to be updated after the function execution.
+ * @param end_wb_data
+ *   The last write back data element to be returned. It is used only in cipher
+ *   and hash chain operations.
+ * @param src
+ *   The source data pointer
+ * @param offset
+ *   The offset to both source and destination data. For source data the offset
+ *   is the number of bytes between src and start point of cipher operation. For
+ *   destination data the offset is the number of bytes from *cur_desc->addr
+ *   to the point where the src will be written to.
+ * @param write_back_len
+ *   The size of the write back length.
+ * @return
+ *   The pointer to the start of the write back data linked list.
+ */
+static struct vhost_crypto_writeback_data *
+prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+		struct vring_desc **cur_desc,
+		struct vhost_crypto_writeback_data **end_wb_data,
+		uint8_t *src,
+		uint32_t offset,
+		uint64_t write_back_len)
 {
-	struct rte_mbuf *mbuf = op->sym->m_dst;
-	struct vring_desc *head = vc_req->head;
-	struct vring_desc *desc = vc_req->wb_desc;
-	int left = vc_req->wb_len;
-	uint32_t to_write;
-	uint8_t *src_data = mbuf->buf_addr, *dst;
+	struct vhost_crypto_writeback_data *wb_data, *head;
+	struct vring_desc *desc = *cur_desc;
 	uint64_t dlen;
+	uint8_t *dst;
+	int ret;
 
-	rte_prefetch0(&head[desc->next]);
-	to_write = RTE_MIN(desc->len, (uint32_t)left);
-	dlen = desc->len;
-	dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
-			VHOST_ACCESS_RW);
-	if (unlikely(!dst || dlen != desc->len)) {
-		VC_LOG_ERR("Failed to map descriptor");
-		return -1;
+	ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
+	if (unlikely(ret < 0)) {
+		VC_LOG_ERR("no memory");
+		goto error_exit;
 	}
 
-	rte_memcpy(dst, src_data, to_write);
-	left -= to_write;
-	src_data += to_write;
+	wb_data = head;
 
-	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
-		desc = &head[desc->next];
-		rte_prefetch0(&head[desc->next]);
-		to_write = RTE_MIN(desc->len, (uint32_t)left);
+	if (likely(desc->len > offset)) {
+		wb_data->src = src + offset;
 		dlen = desc->len;
-		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
-				VHOST_ACCESS_RW);
+		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
+			&dlen, VHOST_ACCESS_RW) + offset;
 		if (unlikely(!dst || dlen != desc->len)) {
 			VC_LOG_ERR("Failed to map descriptor");
-			return -1;
+			goto error_exit;
 		}
 
-		rte_memcpy(dst, src_data, to_write);
-		left -= to_write;
-		src_data += to_write;
-	}
+		wb_data->dst = dst;
+		wb_data->len = desc->len - offset;
+		write_back_len -= wb_data->len;
+		src += offset + wb_data->len;
+		offset = 0;
+
+		if (unlikely(write_back_len)) {
+			ret = rte_mempool_get(vc_req->wb_pool,
+					(void **)&(wb_data->next));
+			if (unlikely(ret < 0)) {
+				VC_LOG_ERR("no memory");
+				goto error_exit;
+			}
+
+			wb_data = wb_data->next;
+		} else
+			wb_data->next = NULL;
+	} else
+		offset -= desc->len;
+
+	while (write_back_len) {
+		desc = &vc_req->head[desc->next];
+		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
+			VC_LOG_ERR("incorrect descriptor");
+			goto error_exit;
+		}
+
+		if (desc->len <= offset) {
+			offset -= desc->len;
+			continue;
+		}
 
-	if (unlikely(left < 0)) {
-		VC_LOG_ERR("Incorrect virtio descriptor");
-		return -1;
+		dlen = desc->len;
+		dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+				VHOST_ACCESS_RW) + offset;
+		if (unlikely(dst == NULL || dlen != desc->len)) {
+			VC_LOG_ERR("Failed to map descriptor");
+			goto error_exit;
+		}
+
+		wb_data->src = src;
+		wb_data->dst = dst;
+		wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
+		write_back_len -= wb_data->len;
+		src += wb_data->len;
+		offset = 0;
+
+		if (write_back_len) {
+			ret = rte_mempool_get(vc_req->wb_pool,
+					(void **)&(wb_data->next));
+			if (unlikely(ret < 0)) {
+				VC_LOG_ERR("no memory");
+				goto error_exit;
+			}
+
+			wb_data = wb_data->next;
+		} else
+			wb_data->next = NULL;
 	}
 
-	return 0;
+	*cur_desc = &vc_req->head[desc->next];
+
+	*end_wb_data = wb_data;
+
+	return head;
+
+error_exit:
+	if (head)
+		free_wb_data(head, vc_req->wb_pool);
+
+	return NULL;
 }
 
@@ -687,4 +792,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 {
 	struct vring_desc *desc = cur_desc;
+	struct vhost_crypto_writeback_data *ewb = NULL;
 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
@@ -705,6 +811,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
 				cipher->para.src_data_len);
-		m_src->buf_addr = get_data_ptr(vc_req, &desc,
-				cipher->para.src_data_len, VHOST_ACCESS_RO);
+		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
 		if (unlikely(m_src->buf_iova == 0 ||
 				m_src->buf_addr == NULL)) {
@@ -713,6 +818,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 			goto error_exit;
 		}
+
+		if (unlikely(move_desc(vc_req->head, &desc,
+				cipher->para.src_data_len) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
+
 		break;
 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+		vc_req->wb_pool = vcrypto->wb_pool;
+
 		if (unlikely(cipher->para.src_data_len >
 				RTE_MBUF_DEFAULT_BUF_SIZE)) {
@@ -745,6 +860,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
 				desc->addr, cipher->para.dst_data_len);
-		m_dst->buf_addr = get_data_ptr(vc_req, &desc,
-				cipher->para.dst_data_len, VHOST_ACCESS_RW);
+		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
 		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
 			VC_LOG_ERR("zero_copy may fail due to cross page data");
@@ -753,14 +867,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 
+		if (unlikely(move_desc(vc_req->head, &desc,
+				cipher->para.dst_data_len) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
+
 		m_dst->data_len = cipher->para.dst_data_len;
 		break;
 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
-		vc_req->wb_desc = desc;
-		vc_req->wb_len = cipher->para.dst_data_len;
-		if (unlikely(move_desc(vc_req->head, &desc,
-				vc_req->wb_len) < 0)) {
+		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
+				cipher->para.dst_data_len);
+		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
 			goto error_exit;
 		}
+
 		break;
 	default:
@@ -776,5 +898,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	op->sym->cipher.data.length = cipher->para.src_data_len;
 
-	vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
 	if (unlikely(vc_req->inhdr == NULL)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
@@ -788,4 +910,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 error_exit:
+	if (vc_req->wb)
+		free_wb_data(vc_req->wb, vc_req->wb_pool);
+
 	vc_req->len = INHDR_LEN;
 	return ret;
@@ -798,5 +923,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct vring_desc *cur_desc)
 {
-	struct vring_desc *desc = cur_desc;
+	struct vring_desc *desc = cur_desc, *digest_desc;
+	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
 	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
 	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
@@ -814,12 +940,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	m_src->data_len = chain->para.src_data_len;
-	m_dst->data_len = chain->para.dst_data_len;
 
 	switch (vcrypto->option) {
 	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+		m_dst->data_len = chain->para.dst_data_len;
+
 		m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
 				chain->para.src_data_len);
-		m_src->buf_addr = get_data_ptr(vc_req, &desc,
-				chain->para.src_data_len, VHOST_ACCESS_RO);
+		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
 		if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
 			VC_LOG_ERR("zero_copy may fail due to cross page data");
@@ -827,6 +953,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 			goto error_exit;
 		}
+
+		if (unlikely(move_desc(vc_req->head, &desc,
+				chain->para.src_data_len) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
 		break;
 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+		vc_req->wb_pool = vcrypto->wb_pool;
+
 		if (unlikely(chain->para.src_data_len >
 				RTE_MBUF_DEFAULT_BUF_SIZE)) {
@@ -840,4 +975,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 			goto error_exit;
 		}
+
 		break;
 	default:
@@ -858,6 +994,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
 				desc->addr, chain->para.dst_data_len);
-		m_dst->buf_addr = get_data_ptr(vc_req, &desc,
-				chain->para.dst_data_len, VHOST_ACCESS_RW);
+		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
 		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
 			VC_LOG_ERR("zero_copy may fail due to cross page data");
@@ -866,8 +1001,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 
+		if (unlikely(move_desc(vc_req->head, &desc,
+				chain->para.dst_data_len) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
+
 		op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
 				desc->addr, chain->para.hash_result_len);
-		op->sym->auth.digest.data = get_data_ptr(vc_req, &desc,
-				chain->para.hash_result_len, VHOST_ACCESS_RW);
+		op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
+				VHOST_ACCESS_RW);
 		if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
 			VC_LOG_ERR("zero_copy may fail due to cross page data");
@@ -875,20 +1017,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 			goto error_exit;
 		}
+
+		if (unlikely(move_desc(vc_req->head, &desc,
+				chain->para.hash_result_len) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
+
 		break;
 	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
-		digest_offset = m_dst->data_len;
-		digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *,
+		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
+				rte_pktmbuf_mtod(m_src, uint8_t *),
+				chain->para.cipher_start_src_offset,
+				chain->para.dst_data_len -
+				chain->para.cipher_start_src_offset);
+		if (unlikely(vc_req->wb == NULL)) {
+			ret = VIRTIO_CRYPTO_ERR;
+			goto error_exit;
+		}
+
+		digest_offset = m_src->data_len;
+		digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
 				digest_offset);
+		digest_desc = desc;
 
-		vc_req->wb_desc = desc;
-		vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len;
-
-		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.dst_data_len) < 0)) {
-			ret = VIRTIO_CRYPTO_BADMSG;
+		/** create a wb_data for digest */
+		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
+				digest_addr, 0, chain->para.hash_result_len);
+		if (unlikely(ewb->next == NULL)) {
+			ret = VIRTIO_CRYPTO_ERR;
 			goto error_exit;
 		}
 
-		if (unlikely(copy_data(digest_addr, vc_req, &desc,
+		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
 				chain->para.hash_result_len)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
@@ -897,5 +1057,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		op->sym->auth.digest.data = digest_addr;
-		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,
+		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
 				digest_offset);
 		break;
@@ -906,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* record inhdr */
-	vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
 	if (unlikely(vc_req->inhdr == NULL)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
@@ -929,4 +1089,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 error_exit:
+	if (vc_req->wb)
+		free_wb_data(vc_req->wb, vc_req->wb_pool);
 	vc_req->len = INHDR_LEN;
 	return ret;
@@ -969,5 +1131,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	vc_req->zero_copy = vcrypto->option;
 
-	req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);
+	req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
 	if (unlikely(req == NULL)) {
 		switch (vcrypto->option) {
@@ -990,4 +1152,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 			goto error_exit;
 		}
+	} else {
+		if (unlikely(move_desc(vc_req->head, &desc,
+				sizeof(*req)) < 0)) {
+			VC_LOG_ERR("Incorrect descriptor");
+			goto error_exit;
+		}
 	}
 
@@ -1064,5 +1232,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
 	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
 	uint16_t desc_idx;
-	int ret = 0;
 
 	if (unlikely(!vc_req)) {
@@ -1079,9 +1246,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
 		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
 	else {
-		if (vc_req->zero_copy == 0) {
-			ret = write_back_data(op, vc_req);
-			if (unlikely(ret != 0))
-				vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
-		}
+		if (vc_req->zero_copy == 0)
+			write_back_data(vc_req);
 	}
 
@@ -1089,7 +1253,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
 	vc_req->vq->used->ring[desc_idx].len = vc_req->len;
 
-	rte_mempool_put(m_dst->pool, (void *)m_dst);
 	rte_mempool_put(m_src->pool, (void *)m_src);
 
+	if (m_dst)
+		rte_mempool_put(m_dst->pool, (void *)m_dst);
+
 	return vc_req->vq;
 }
@@ -1188,4 +1354,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
 	}
 
+	snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
+	vcrypto->wb_pool = rte_mempool_create(name,
+			VHOST_CRYPTO_MBUF_POOL_SIZE,
+			sizeof(struct vhost_crypto_writeback_data),
+			128, 0, NULL, NULL, NULL, NULL,
+			rte_socket_id(), 0);
+	if (!vcrypto->wb_pool) {
+		VC_LOG_ERR("Failed to creath mempool");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
 	dev->extern_data = vcrypto;
 	dev->extern_ops.pre_msg_handle = NULL;
@@ -1224,4 +1402,5 @@ rte_vhost_crypto_free(int vid)
 	rte_hash_free(vcrypto->session_map);
 	rte_mempool_free(vcrypto->mbuf_pool);
+	rte_mempool_free(vcrypto->wb_pool);
 	rte_free(vcrypto);
 
@@ -1259,9 +1438,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
 		return 0;
 
-	if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
+	if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
+			!(rte_mempool_full(vcrypto->wb_pool))) {
 		VC_LOG_ERR("Cannot update zero copy as mempool is not full");
 		return -EINVAL;
 	}
 
+	if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
+		char name[128];
+
+		snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
+		vcrypto->wb_pool = rte_mempool_create(name,
+				VHOST_CRYPTO_MBUF_POOL_SIZE,
+				sizeof(struct vhost_crypto_writeback_data),
+				128, 0, NULL, NULL, NULL, NULL,
+				rte_socket_id(), 0);
+		if (!vcrypto->wb_pool) {
+			VC_LOG_ERR("Failed to creath mbuf pool");
+			return -ENOMEM;
+		}
+	} else {
+		rte_mempool_free(vcrypto->wb_pool);
+		vcrypto->wb_pool = NULL;
+	}
+
 	vcrypto->option = (uint8_t)option;
 
@@ -1279,7 +1477,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
 	uint16_t avail_idx;
 	uint16_t start_idx;
-	uint16_t required;
 	uint16_t count;
-	uint16_t i;
+	uint16_t i = 0;
 
 	if (unlikely(dev == NULL)) {
@@ -1313,25 +1510,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
 	 * we need only 1 mbuf as src and dst
 	 */
-	required = count * 2;
-	if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
-			required) < 0)) {
-		VC_LOG_ERR("Insufficient memory");
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < count; i++) {
-		uint16_t used_idx = (start_idx + i) & (vq->size - 1);
-		uint16_t desc_idx = vq->avail->ring[used_idx];
-		struct vring_desc *head = &vq->desc[desc_idx];
-		struct rte_crypto_op *op = ops[i];
-
-		op->sym->m_src = mbufs[i * 2];
-		op->sym->m_dst = mbufs[i * 2 + 1];
-		op->sym->m_src->data_off = 0;
-		op->sym->m_dst->data_off = 0;
-
-		if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
-				desc_idx)) < 0)
-			break;
+	switch (vcrypto->option) {
+	case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
+				(void **)mbufs, count * 2) < 0)) {
+			VC_LOG_ERR("Insufficient memory");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < count; i++) {
+			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+			uint16_t desc_idx = vq->avail->ring[used_idx];
+			struct vring_desc *head = &vq->desc[desc_idx];
+			struct rte_crypto_op *op = ops[i];
+
+			op->sym->m_src = mbufs[i * 2];
+			op->sym->m_dst = mbufs[i * 2 + 1];
+			op->sym->m_src->data_off = 0;
+			op->sym->m_dst->data_off = 0;
+
+			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
+					op, head, desc_idx)) < 0)
+				break;
+		}
+
+		if (unlikely(i < count))
+			rte_mempool_put_bulk(vcrypto->mbuf_pool,
+					(void **)&mbufs[i * 2],
+					(count - i) * 2);
+
+		break;
+
+	case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+		if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
+				(void **)mbufs, count) < 0)) {
+			VC_LOG_ERR("Insufficient memory");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < count; i++) {
+			uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+			uint16_t desc_idx = vq->avail->ring[used_idx];
+			struct vring_desc *head = &vq->desc[desc_idx];
+			struct rte_crypto_op *op = ops[i];
+
+			op->sym->m_src = mbufs[i];
+			op->sym->m_dst = NULL;
+			op->sym->m_src->data_off = 0;
+
+			if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
+					op, head, desc_idx)) < 0)
+				break;
+		}
+
+		if (unlikely(i < count))
+			rte_mempool_put_bulk(vcrypto->mbuf_pool,
+					(void **)&mbufs[i],
+					count - i);
+
+		break;
+
 	}
 
-- 
2.19.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2018-11-29 13:11:35.371781800 +0000
+++ 0013-vhost-crypto-fix-packet-copy-in-chaining-mode.patch	2018-11-29 13:11:34.000000000 +0000
@@ -1,8 +1,10 @@
-From cd1e8f03abf0323e6210ae89a80b89b242a35260 Mon Sep 17 00:00:00 2001
+From fac3c5ab38573c74522e5e4ab479087ef1293a4e Mon Sep 17 00:00:00 2001
 From: Fan Zhang <roy.fan.zhang@intel.com>
 Date: Tue, 6 Nov 2018 16:22:48 +0000
 Subject: [PATCH] vhost/crypto: fix packet copy in chaining mode
 
+[ upstream commit cd1e8f03abf0323e6210ae89a80b89b242a35260 ]
+
 This patch fixes the incorrect packet content copy in the
 chaining mode. Originally the content before cipher offset is
 overwritten by all zeros. This patch fixes the problem by
@@ -10,7 +12,6 @@
 settings during set up.
 
 Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
-Cc: stable@dpdk.org
 
 Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
@@ -19,7 +20,7 @@
  1 file changed, 350 insertions(+), 114 deletions(-)
 
 diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
-index 5472bead0..dd01afc08 100644
+index 1affeddfe..340c8a942 100644
 --- a/lib/librte_vhost/vhost_crypto.c
 +++ b/lib/librte_vhost/vhost_crypto.c
 @@ -199,4 +199,5 @@ struct vhost_crypto {
@@ -49,7 +50,7 @@
 +	struct rte_mempool *wb_pool;
  	uint16_t desc_idx;
  	uint16_t len;
-@@ -507,8 +515,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -508,8 +516,6 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  	}
  
 -	if (unlikely(left > 0)) {
@@ -59,7 +60,7 @@
 -	}
  
  	*cur_desc = &head[desc->next];
-@@ -516,4 +522,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -517,4 +523,20 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  }
  
 +static __rte_always_inline void *
@@ -80,7 +81,7 @@
 +
  static int
  copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-@@ -532,8 +554,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -533,8 +555,6 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
  			VHOST_ACCESS_RO);
 -	if (unlikely(!src || !dlen)) {
@@ -90,7 +91,7 @@
 -	}
  
  	rte_memcpy((uint8_t *)data, src, dlen);
-@@ -610,71 +630,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -611,71 +631,156 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  }
  
 -static __rte_always_inline void *
@@ -293,13 +294,13 @@
 +	return NULL;
  }
  
-@@ -686,4 +791,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -687,4 +792,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  {
  	struct vring_desc *desc = cur_desc;
 +	struct vhost_crypto_writeback_data *ewb = NULL;
  	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
  	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
-@@ -704,6 +810,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -705,6 +811,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
  				cipher->para.src_data_len);
 -		m_src->buf_addr = get_data_ptr(vc_req, &desc,
@@ -307,7 +308,7 @@
 +		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
  		if (unlikely(m_src->buf_iova == 0 ||
  				m_src->buf_addr == NULL)) {
-@@ -712,6 +817,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -713,6 +818,16 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  			goto error_exit;
  		}
 +
@@ -324,7 +325,7 @@
 +
  		if (unlikely(cipher->para.src_data_len >
  				RTE_MBUF_DEFAULT_BUF_SIZE)) {
-@@ -744,6 +859,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -745,6 +860,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
  				desc->addr, cipher->para.dst_data_len);
 -		m_dst->buf_addr = get_data_ptr(vc_req, &desc,
@@ -332,7 +333,7 @@
 +		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
  		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
  			VC_LOG_ERR("zero_copy may fail due to cross page data");
-@@ -752,14 +866,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -753,14 +867,22 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  
 +		if (unlikely(move_desc(vc_req->head, &desc,
@@ -359,14 +360,14 @@
 +
  		break;
  	default:
-@@ -775,5 +897,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -776,5 +898,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	op->sym->cipher.data.length = cipher->para.src_data_len;
  
 -	vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
 +	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
  	if (unlikely(vc_req->inhdr == NULL)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
-@@ -787,4 +909,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -788,4 +910,7 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  error_exit:
 +	if (vc_req->wb)
@@ -374,7 +375,7 @@
 +
  	vc_req->len = INHDR_LEN;
  	return ret;
-@@ -797,5 +922,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -798,5 +923,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct vring_desc *cur_desc)
  {
 -	struct vring_desc *desc = cur_desc;
@@ -382,7 +383,7 @@
 +	struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
  	struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
  	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
-@@ -813,12 +939,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -814,12 +940,12 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	m_src->data_len = chain->para.src_data_len;
 -	m_dst->data_len = chain->para.dst_data_len;
@@ -398,7 +399,7 @@
 +		m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
  		if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
  			VC_LOG_ERR("zero_copy may fail due to cross page data");
-@@ -826,6 +952,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -827,6 +953,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  			goto error_exit;
  		}
 +
@@ -414,13 +415,13 @@
 +
  		if (unlikely(chain->para.src_data_len >
  				RTE_MBUF_DEFAULT_BUF_SIZE)) {
-@@ -839,4 +974,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -840,4 +975,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  			goto error_exit;
  		}
 +
  		break;
  	default:
-@@ -857,6 +993,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -858,6 +994,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
  				desc->addr, chain->para.dst_data_len);
 -		m_dst->buf_addr = get_data_ptr(vc_req, &desc,
@@ -428,7 +429,7 @@
 +		m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
  		if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
  			VC_LOG_ERR("zero_copy may fail due to cross page data");
-@@ -865,8 +1000,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -866,8 +1001,15 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  
 +		if (unlikely(move_desc(vc_req->head, &desc,
@@ -446,7 +447,7 @@
 +				VHOST_ACCESS_RW);
  		if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
  			VC_LOG_ERR("zero_copy may fail due to cross page data");
-@@ -874,20 +1016,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -875,20 +1017,38 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  			goto error_exit;
  		}
 +
@@ -494,35 +495,35 @@
 +		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
  				chain->para.hash_result_len)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
-@@ -896,5 +1056,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -897,5 +1057,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		op->sym->auth.digest.data = digest_addr;
 -		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,
 +		op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
  				digest_offset);
  		break;
-@@ -905,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -906,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* record inhdr */
 -	vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
 +	vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
  	if (unlikely(vc_req->inhdr == NULL)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
-@@ -928,4 +1088,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -929,4 +1089,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  error_exit:
 +	if (vc_req->wb)
 +		free_wb_data(vc_req->wb, vc_req->wb_pool);
  	vc_req->len = INHDR_LEN;
  	return ret;
-@@ -968,5 +1130,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -969,5 +1131,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	vc_req->zero_copy = vcrypto->option;
  
 -	req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);
 +	req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
  	if (unlikely(req == NULL)) {
  		switch (vcrypto->option) {
-@@ -989,4 +1151,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -990,4 +1152,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  			goto error_exit;
  		}
 +	} else {
@@ -533,13 +534,13 @@
 +		}
  	}
  
-@@ -1063,5 +1231,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
+@@ -1064,5 +1232,4 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
  	struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
  	uint16_t desc_idx;
 -	int ret = 0;
  
  	if (unlikely(!vc_req)) {
-@@ -1078,9 +1245,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
+@@ -1079,9 +1246,6 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
  		vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
  	else {
 -		if (vc_req->zero_copy == 0) {
@@ -551,7 +552,7 @@
 +			write_back_data(vc_req);
  	}
  
-@@ -1088,7 +1252,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
+@@ -1089,7 +1253,9 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
  	vc_req->vq->used->ring[desc_idx].len = vc_req->len;
  
 -	rte_mempool_put(m_dst->pool, (void *)m_dst);
@@ -562,7 +563,7 @@
 +
  	return vc_req->vq;
  }
-@@ -1187,4 +1353,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+@@ -1188,4 +1354,16 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
  	}
  
 +	snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
@@ -579,13 +580,13 @@
 +
  	dev->extern_data = vcrypto;
  	dev->extern_ops.pre_msg_handle = NULL;
-@@ -1223,4 +1401,5 @@ rte_vhost_crypto_free(int vid)
+@@ -1224,4 +1402,5 @@ rte_vhost_crypto_free(int vid)
  	rte_hash_free(vcrypto->session_map);
  	rte_mempool_free(vcrypto->mbuf_pool);
 +	rte_mempool_free(vcrypto->wb_pool);
  	rte_free(vcrypto);
  
-@@ -1258,9 +1437,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
+@@ -1259,9 +1438,28 @@ rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
  		return 0;
  
 -	if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
@@ -615,7 +616,7 @@
 +
  	vcrypto->option = (uint8_t)option;
  
-@@ -1278,7 +1476,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+@@ -1279,7 +1477,6 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
  	uint16_t avail_idx;
  	uint16_t start_idx;
 -	uint16_t required;
@@ -624,7 +625,7 @@
 +	uint16_t i = 0;
  
  	if (unlikely(dev == NULL)) {
-@@ -1312,25 +1509,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+@@ -1313,25 +1510,64 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
  	 * we need only 1 mbuf as src and dst
  	 */
 -	required = count * 2;

  parent reply	other threads:[~2018-11-29 13:22 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-29 13:20 [dpdk-stable] patch 'app/testpmd: fix port status for new bonded devices' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/i40e: fix Rx instability with vector mode' " Kevin Traynor
2018-11-29 13:30   ` Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/vmxnet3: fix hot-unplug' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/e1000/base: fix uninitialized variable' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/ixgbe: stop link setup alarm handler before start' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/cxgbevf: add PCI uninitialization for VF' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/cxgbe: check Rx config before doing VLAN strip offload' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/cxgbe: fix check for redefined match items' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/cxgbe: increase completion wait time for flow operations' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/cxgbe: fix wrong ingress port value set in filter spec' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/thunderx: fix Tx desc corruption in scatter-gather mode' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'vhost: fix IOVA access for packed ring' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/virtio-user: fix typo in error message' " Kevin Traynor
2018-11-29 13:20 ` Kevin Traynor [this message]
2018-11-29 13:20 ` [dpdk-stable] patch 'net/virtio: fix unchecked return value' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'vhost: remove unneeded null pointer check' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/bnx2x: fix dynamic logging' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/bnx2x: fix VF link state update' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/bonding: fix crash when stopping mode 4 port' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: fallback quietly if pkg-config is unavailable' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/qede: fix crash when configure fails' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'app/testpmd: fix L4 length for UDP checksum' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/octeontx: fix mbuf corruption with large private sizes' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/qede: fix Tx tunnel offload support mask' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'ethdev: fix invalid configuration after failure' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'ethdev: fix device info getting' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/sfc/base: fix field order in filter spec struct' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/enic: fix size check in Tx prepare handler' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/ixgbevf: fix link state' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'app/testpmd: fix memory leak for TM object' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'app/testpmd: fix memory allocation for DSCP table' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'crypto/ccp: fix resource leak' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'crypto/aesni_mb: fix queue pair free' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'examples/l3fwd-power: fix power library fallback' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'test/power: fix ACPI cpufreq module miss " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'test/kni: fix " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'test/reorder: fix out of bound access' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'malloc: fix adjacency check to also include segment list' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'build: set -mfpu=neon flag for armv7-a with meson' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'efd: fix write unlock during ring creation' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/i40e: fix X710 Rx after reading some registers' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/ixgbe: fix maximum wait time in comment' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx4: fix minor typo' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: fix minor typos' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx4: fix initialization of struct members' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx4: optimize Tx external memory registration' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: optimize Tx doorbell write' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: optimize Rx buffer replenishment threshold' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/mlx5: fix packet type for MPLS in UDP' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/ena: fix cleaning HW IO rings configuration' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/octeontx: fix failures when available ports > queues' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/avf: fix Tx offload mask' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'net/bonding: fix possible silent failure in configuration' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'app/testpmd: fix memory leak for DSCP table' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'app/pdump: fix port id storage size' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'examples/ipv4_multicast: fix leak of cloned packets' " Kevin Traynor
2018-11-29 13:20 ` [dpdk-stable] patch 'test: fix interrupt check' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'test/power: skip KVM autotest if cannot run' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'test: release ring resources after PMD perf test' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'devtools: fix regex in symbol addition check' " Kevin Traynor
2018-11-30 13:55   ` Neil Horman
2018-11-30 20:36     ` Kevin Traynor
2018-12-01 19:13       ` Neil Horman
2018-11-29 13:21 ` [dpdk-stable] patch 'usertools: check for lspci dependency' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: fix DESTDIR variable name in meson guide' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: fix NUMA library name in Linux " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: add cross-compilation in sample apps " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: fix formatting in IP reassembly app " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: fix function name in flow filtering " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'ipc: fix access after async request failure' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'eal: fix build with -O1' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'kni: fix possible uninitialized variable' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/mlx4: " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'eventdev: fix unlock in Rx adapter' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'config: enable more than 128 cores for arm64' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/i40e/base: fix comment referencing internal data' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/ena: fix out of order completion' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/qede: fix Tx offload mask' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/ixgbe: fix TDH register write' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'vhost: fix packed ring constants declaration' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'doc: remove old options from pdump guide' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'eal/bsd: fix possible IOPL fd leak' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'net/virtio: avoid annoying IOPL error log' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'bus/pci: fix allocation of device path' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'vfio: do not needlessly setup device in secondary process' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'mem: fix division by zero in no-NUMA mode' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'fix indentation in symbol maps' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'fix dpdk.org URLs' " Kevin Traynor
2018-11-29 13:21 ` [dpdk-stable] patch 'ethdev: eliminate interim variable' " Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181129132128.7609-14-ktraynor@redhat.com \
    --to=ktraynor@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=roy.fan.zhang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).