patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Kevin Traynor <ktraynor@redhat.com>
To: Fan Zhang <roy.fan.zhang@intel.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>,
	dpdk stable <stable@dpdk.org>
Subject: [dpdk-stable] patch 'vhost/crypto: fix possible dead loop' has been queued to LTS release 18.11.1
Date: Thu, 31 Jan 2019 15:48:36 +0000	[thread overview]
Message-ID: <20190131154901.5383-28-ktraynor@redhat.com> (raw)
In-Reply-To: <20190131154901.5383-1-ktraynor@redhat.com>

Hi,

FYI, your patch has been queued to LTS release 18.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/07/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Kevin Traynor

---
>From 25b041012ca317971230bb563daa144aef58a0ea Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang@intel.com>
Date: Fri, 4 Jan 2019 11:22:45 +0000
Subject: [PATCH] vhost/crypto: fix possible dead loop

[ upstream commit c7e7244b82ad174a8ca51a385e6ad2eb508261d8 ]

This patch fixes a possible infinite loop caused by incorrect
descriptor chain created by the driver.

Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/vhost_crypto.c | 121 ++++++++++++++++++++++----------
 1 file changed, 82 insertions(+), 39 deletions(-)

diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index dd01afc08..80b83ef77 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -467,5 +467,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
 
 static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+		uint32_t *nb_descs)
 {
 	if (desc->flags & VRING_DESC_F_WRITE)
@@ -473,4 +474,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
 
 	while (desc->flags & VRING_DESC_F_NEXT) {
+		if (unlikely(*nb_descs == 0))
+			return NULL;
+		(*nb_descs)--;
+
 		desc = &head[desc->next];
 		if (desc->flags & VRING_DESC_F_WRITE)
@@ -482,11 +487,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
 
 static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+		uint32_t *nb_descs)
 {
 	uint64_t dlen;
 	struct virtio_crypto_inhdr *inhdr;
 
-	while (desc->flags & VRING_DESC_F_NEXT)
+	while (desc->flags & VRING_DESC_F_NEXT) {
+		if (unlikely(*nb_descs == 0))
+			return NULL;
+		(*nb_descs)--;
 		desc = &vc_req->head[desc->next];
+	}
 
 	dlen = desc->len;
@@ -501,13 +511,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
 static __rte_always_inline int
 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
-		uint32_t size)
+		uint32_t size, uint32_t *nb_descs)
 {
 	struct vring_desc *desc = *cur_desc;
-	int left = size;
-
-	rte_prefetch0(&head[desc->next]);
-	left -= desc->len;
+	int left = size - desc->len;
 
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+		(*nb_descs)--;
+		if (unlikely(*nb_descs == 0))
+			return -1;
+
 		desc = &head[desc->next];
 		rte_prefetch0(&head[desc->next]);
@@ -518,5 +529,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 		return -1;
 
-	*cur_desc = &head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &head[desc->next];
 	return 0;
 }
@@ -540,5 +554,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
 static int
 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-		struct vring_desc **cur_desc, uint32_t size)
+		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
 {
 	struct vring_desc *desc = *cur_desc;
@@ -549,5 +563,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	int left = size;
 
-	rte_prefetch0(&vc_req->head[desc->next]);
 	to_copy = RTE_MIN(desc->len, (uint32_t)left);
 	dlen = to_copy;
@@ -583,4 +596,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+		if (unlikely(*nb_descs == 0)) {
+			VC_LOG_ERR("Invalid descriptors");
+			return -1;
+		}
+		(*nb_descs)--;
+
 		desc = &vc_req->head[desc->next];
 		rte_prefetch0(&vc_req->head[desc->next]);
@@ -625,5 +644,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	}
 
-	*cur_desc = &vc_req->head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &vc_req->head[desc->next];
 
 	return 0;
@@ -685,5 +707,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint8_t *src,
 		uint32_t offset,
-		uint64_t write_back_len)
+		uint64_t write_back_len,
+		uint32_t *nb_descs)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
@@ -732,4 +755,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 
 	while (write_back_len) {
+		if (unlikely(*nb_descs == 0)) {
+			VC_LOG_ERR("Invalid descriptors");
+			goto error_exit;
+		}
+		(*nb_descs)--;
+
 		desc = &vc_req->head[desc->next];
 		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
@@ -771,5 +800,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 	}
 
-	*cur_desc = &vc_req->head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &vc_req->head[desc->next];
 
 	*end_wb_data = wb_data;
@@ -788,5 +820,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct vhost_crypto_data_req *vc_req,
 		struct virtio_crypto_cipher_data_req *cipher,
-		struct vring_desc *cur_desc)
+		struct vring_desc *cur_desc,
+		uint32_t *nb_descs)
 {
 	struct vring_desc *desc = cur_desc;
@@ -798,6 +831,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* prepare */
 	/* iv */
-	if (unlikely(copy_data(iv_data, vc_req, &desc,
-			cipher->para.iv_len) < 0)) {
+	if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
+			nb_descs) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -819,5 +852,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.src_data_len) < 0)) {
+				cipher->para.src_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -836,6 +869,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, &desc, cipher->para.src_data_len)
-				< 0)) {
+				vc_req, &desc, cipher->para.src_data_len,
+				nb_descs) < 0)) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -848,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc);
+	desc = find_write_desc(vc_req->head, desc, nb_descs);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -867,5 +900,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.dst_data_len) < 0)) {
+				cipher->para.dst_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -878,5 +911,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
 				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
-				cipher->para.dst_data_len);
+				cipher->para.dst_data_len, nb_descs);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -920,5 +953,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct vhost_crypto_data_req *vc_req,
 		struct virtio_crypto_alg_chain_data_req *chain,
-		struct vring_desc *cur_desc)
+		struct vring_desc *cur_desc,
+		uint32_t *nb_descs)
 {
 	struct vring_desc *desc = cur_desc, *digest_desc;
@@ -933,5 +967,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* iv */
 	if (unlikely(copy_data(iv_data, vc_req, &desc,
-			chain->para.iv_len) < 0)) {
+			chain->para.iv_len, nb_descs) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -954,5 +988,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.src_data_len) < 0)) {
+				chain->para.src_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -970,5 +1004,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, &desc, chain->para.src_data_len)) < 0) {
+				vc_req, &desc, chain->para.src_data_len,
+				nb_descs)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -982,5 +1017,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc);
+	desc = find_write_desc(vc_req->head, desc, nb_descs);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -1001,5 +1036,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.dst_data_len) < 0)) {
+				chain->para.dst_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1018,5 +1053,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.hash_result_len) < 0)) {
+				chain->para.hash_result_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1030,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				chain->para.cipher_start_src_offset,
 				chain->para.dst_data_len -
-				chain->para.cipher_start_src_offset);
+				chain->para.cipher_start_src_offset, nb_descs);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1043,5 +1078,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		/** create a wb_data for digest */
 		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
-				digest_addr, 0, chain->para.hash_result_len);
+				digest_addr, 0, chain->para.hash_result_len,
+				nb_descs);
 		if (unlikely(ewb->next == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1050,5 +1086,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
-				chain->para.hash_result_len)) < 0) {
+				chain->para.hash_result_len, nb_descs)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1109,4 +1145,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	uint64_t session_id;
 	uint64_t dlen;
+	uint32_t nb_descs = vq->size;
 	int err = 0;
 
@@ -1117,4 +1154,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
 		dlen = head->len;
+		nb_descs = dlen / sizeof(struct vring_desc);
+		/* drop invalid descriptors */
+		if (unlikely(nb_descs > vq->size))
+			return -1;
 		desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
 				&dlen, VHOST_ACCESS_RO);
@@ -1139,6 +1180,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
 			req = &tmp_req;
-			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
-					< 0)) {
+			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
+					&nb_descs) < 0)) {
 				err = VIRTIO_CRYPTO_BADMSG;
 				VC_LOG_ERR("Invalid descriptor");
@@ -1153,5 +1194,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	} else {
 		if (unlikely(move_desc(vc_req->head, &desc,
-				sizeof(*req)) < 0)) {
+				sizeof(*req), &nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			goto error_exit;
@@ -1194,9 +1235,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
 			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
-					&req->u.sym_req.u.cipher, desc);
+					&req->u.sym_req.u.cipher, desc,
+					&nb_descs);
 			break;
 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
 			err = prepare_sym_chain_op(vcrypto, op, vc_req,
-					&req->u.sym_req.u.chain, desc);
+					&req->u.sym_req.u.chain, desc,
+					&nb_descs);
 			break;
 		}
@@ -1216,5 +1259,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 error_exit:
 
-	inhdr = reach_inhdr(vc_req, desc);
+	inhdr = reach_inhdr(vc_req, desc, &nb_descs);
 	if (likely(inhdr != NULL))
 		inhdr->status = (uint8_t)err;
-- 
2.19.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-01-31 15:44:06.398900413 +0000
+++ 0028-vhost-crypto-fix-possible-dead-loop.patch	2019-01-31 15:44:05.000000000 +0000
@@ -1,13 +1,14 @@
-From c7e7244b82ad174a8ca51a385e6ad2eb508261d8 Mon Sep 17 00:00:00 2001
+From 25b041012ca317971230bb563daa144aef58a0ea Mon Sep 17 00:00:00 2001
 From: Fan Zhang <roy.fan.zhang@intel.com>
 Date: Fri, 4 Jan 2019 11:22:45 +0000
 Subject: [PATCH] vhost/crypto: fix possible dead loop
 
+[ upstream commit c7e7244b82ad174a8ca51a385e6ad2eb508261d8 ]
+
 This patch fixes a possible infinite loop caused by incorrect
 descriptor chain created by the driver.
 
 Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
-Cc: stable@dpdk.org
 
 Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
@@ -16,10 +17,10 @@
  1 file changed, 82 insertions(+), 39 deletions(-)
 
 diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
-index 598196fb7..e12458ce0 100644
+index dd01afc08..80b83ef77 100644
 --- a/lib/librte_vhost/vhost_crypto.c
 +++ b/lib/librte_vhost/vhost_crypto.c
-@@ -468,5 +468,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
+@@ -467,5 +467,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
  
  static __rte_always_inline struct vring_desc *
 -find_write_desc(struct vring_desc *head, struct vring_desc *desc)
@@ -27,7 +28,7 @@
 +		uint32_t *nb_descs)
  {
  	if (desc->flags & VRING_DESC_F_WRITE)
-@@ -474,4 +475,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+@@ -473,4 +474,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
  
  	while (desc->flags & VRING_DESC_F_NEXT) {
 +		if (unlikely(*nb_descs == 0))
@@ -36,7 +37,7 @@
 +
  		desc = &head[desc->next];
  		if (desc->flags & VRING_DESC_F_WRITE)
-@@ -483,11 +488,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+@@ -482,11 +487,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
  
  static struct virtio_crypto_inhdr *
 -reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
@@ -55,7 +56,7 @@
 +	}
  
  	dlen = desc->len;
-@@ -502,13 +512,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+@@ -501,13 +511,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
  static __rte_always_inline int
  move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 -		uint32_t size)
@@ -75,7 +76,7 @@
 +
  		desc = &head[desc->next];
  		rte_prefetch0(&head[desc->next]);
-@@ -519,5 +530,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -518,5 +529,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  		return -1;
  
 -	*cur_desc = &head[desc->next];
@@ -85,20 +86,20 @@
 +		*cur_desc = &head[desc->next];
  	return 0;
  }
-@@ -541,5 +555,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+@@ -540,5 +554,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
  static int
  copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 -		struct vring_desc **cur_desc, uint32_t size)
 +		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
  {
  	struct vring_desc *desc = *cur_desc;
-@@ -550,5 +564,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -549,5 +563,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	int left = size;
  
 -	rte_prefetch0(&vc_req->head[desc->next]);
  	to_copy = RTE_MIN(desc->len, (uint32_t)left);
  	dlen = to_copy;
-@@ -584,4 +597,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -583,4 +596,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  
  	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
 +		if (unlikely(*nb_descs == 0)) {
@@ -109,7 +110,7 @@
 +
  		desc = &vc_req->head[desc->next];
  		rte_prefetch0(&vc_req->head[desc->next]);
-@@ -626,5 +645,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -625,5 +644,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	}
  
 -	*cur_desc = &vc_req->head[desc->next];
@@ -119,7 +120,7 @@
 +		*cur_desc = &vc_req->head[desc->next];
  
  	return 0;
-@@ -686,5 +708,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -685,5 +707,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  		uint8_t *src,
  		uint32_t offset,
 -		uint64_t write_back_len)
@@ -127,7 +128,7 @@
 +		uint32_t *nb_descs)
  {
  	struct vhost_crypto_writeback_data *wb_data, *head;
-@@ -733,4 +756,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -732,4 +755,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  
  	while (write_back_len) {
 +		if (unlikely(*nb_descs == 0)) {
@@ -138,7 +139,7 @@
 +
  		desc = &vc_req->head[desc->next];
  		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
-@@ -772,5 +801,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -771,5 +800,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  	}
  
 -	*cur_desc = &vc_req->head[desc->next];
@@ -148,7 +149,7 @@
 +		*cur_desc = &vc_req->head[desc->next];
  
  	*end_wb_data = wb_data;
-@@ -789,5 +821,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -788,5 +820,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct vhost_crypto_data_req *vc_req,
  		struct virtio_crypto_cipher_data_req *cipher,
 -		struct vring_desc *cur_desc)
@@ -156,7 +157,7 @@
 +		uint32_t *nb_descs)
  {
  	struct vring_desc *desc = cur_desc;
-@@ -799,6 +832,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -798,6 +831,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* prepare */
  	/* iv */
 -	if (unlikely(copy_data(iv_data, vc_req, &desc,
@@ -165,14 +166,14 @@
 +			nb_descs) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -820,5 +853,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -819,5 +852,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.src_data_len) < 0)) {
 +				cipher->para.src_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -837,6 +870,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -836,6 +869,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 -				vc_req, &desc, cipher->para.src_data_len)
@@ -181,28 +182,28 @@
 +				nb_descs) < 0)) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -849,5 +882,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -848,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -868,5 +901,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -867,5 +900,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.dst_data_len) < 0)) {
 +				cipher->para.dst_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -879,5 +912,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -878,5 +911,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
  				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
 -				cipher->para.dst_data_len);
 +				cipher->para.dst_data_len, nb_descs);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -921,5 +954,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -920,5 +953,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct vhost_crypto_data_req *vc_req,
  		struct virtio_crypto_alg_chain_data_req *chain,
 -		struct vring_desc *cur_desc)
@@ -210,21 +211,21 @@
 +		uint32_t *nb_descs)
  {
  	struct vring_desc *desc = cur_desc, *digest_desc;
-@@ -934,5 +968,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -933,5 +967,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* iv */
  	if (unlikely(copy_data(iv_data, vc_req, &desc,
 -			chain->para.iv_len) < 0)) {
 +			chain->para.iv_len, nb_descs) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -955,5 +989,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -954,5 +988,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.src_data_len) < 0)) {
 +				chain->para.src_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -971,5 +1005,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -970,5 +1004,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 -				vc_req, &desc, chain->para.src_data_len)) < 0) {
@@ -232,35 +233,35 @@
 +				nb_descs)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -983,5 +1018,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -982,5 +1017,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -1002,5 +1037,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1001,5 +1036,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.dst_data_len) < 0)) {
 +				chain->para.dst_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1019,5 +1054,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1018,5 +1053,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.hash_result_len) < 0)) {
 +				chain->para.hash_result_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1031,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1030,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  				chain->para.cipher_start_src_offset,
  				chain->para.dst_data_len -
 -				chain->para.cipher_start_src_offset);
 +				chain->para.cipher_start_src_offset, nb_descs);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1044,5 +1079,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1043,5 +1078,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		/** create a wb_data for digest */
  		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
 -				digest_addr, 0, chain->para.hash_result_len);
@@ -268,20 +269,20 @@
 +				nb_descs);
  		if (unlikely(ewb->next == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1051,5 +1087,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1050,5 +1086,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
 -				chain->para.hash_result_len)) < 0) {
 +				chain->para.hash_result_len, nb_descs)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -1110,4 +1146,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1109,4 +1145,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	uint64_t session_id;
  	uint64_t dlen;
 +	uint32_t nb_descs = vq->size;
  	int err = 0;
  
-@@ -1118,4 +1155,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1117,4 +1154,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
  		dlen = head->len;
 +		nb_descs = dlen / sizeof(struct vring_desc);
@@ -290,7 +291,7 @@
 +			return -1;
  		desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
  				&dlen, VHOST_ACCESS_RO);
-@@ -1140,6 +1181,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1139,6 +1180,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  		case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
  			req = &tmp_req;
 -			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
@@ -299,14 +300,14 @@
 +					&nb_descs) < 0)) {
  				err = VIRTIO_CRYPTO_BADMSG;
  				VC_LOG_ERR("Invalid descriptor");
-@@ -1154,5 +1195,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1153,5 +1194,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	} else {
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				sizeof(*req)) < 0)) {
 +				sizeof(*req), &nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			goto error_exit;
-@@ -1195,9 +1236,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1194,9 +1235,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
  			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
 -					&req->u.sym_req.u.cipher, desc);
@@ -320,7 +321,7 @@
 +					&nb_descs);
  			break;
  		}
-@@ -1217,5 +1260,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1216,5 +1259,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  error_exit:
  
 -	inhdr = reach_inhdr(vc_req, desc);

  parent reply	other threads:[~2019-01-31 15:50 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-31 15:48 [dpdk-stable] patch 'net/i40e: fix get RSS conf' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'devtools: fix wrong headline lowercase for arm' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'drivers/crypto: fix PMDs memory leak' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'doc: fix AESNI_MB guide' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'compress/qat: fix returned status on overflow' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'test/crypto: fix misleading trace message' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'examples/ipsec-secgw: fix crypto-op might never get dequeued' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'examples/ipsec-secgw: fix outbound codepath for single SA' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'examples/ipsec-secgw: make local variables static' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'examples/ipsec-secgw: fix inbound SA checking' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'app/bbdev: fix return value check' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'crypto/dpaa2_sec: fix FLC address for physical mode' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'build: use static deps for pkg-config libs.private' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'build: fix variable name in dependency error message' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'devtools: fix build check for whether meson has run' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'kni: fix build on RHEL 8' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'eal: fix strdup usages in internal config' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'vfio: do not unregister callback in secondary process' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'mem: fix variable shadowing' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'mem: fix storing old policy' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'mk: fix scope of disabling AVX512F support' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'eal: fix build of external app with clang on armv8' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/mlx5: fix shared counter allocation logic' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/ixgbe: fix over using multicast table for VF' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'vhost: fix possible out of bound access in vector filling' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'vhost: fix possible dead loop " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'vhost: ensure event idx is mapped when negotiated' " Kevin Traynor
2019-01-31 15:48 ` Kevin Traynor [this message]
2019-01-31 15:48 ` [dpdk-stable] patch 'vhost/crypto: fix possible out of bound access' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/fm10k: fix internal switch initial status' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/dpaa: fix logical to physical core affine logic' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/dpaa: fix secondary process' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'examples/flow_filtering: fix example documentation' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'doc: fix a typo in testpmd guide' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'doc: fix a parameter name " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'app/testpmd: fix quit to stop all ports before close' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/bonding: fix possible null pointer reference' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/sfc: add missing header guard to TSO header file' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/sfc: discard last seen VLAN TCI if Tx packet is dropped' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/sfc/base: fix Tx descriptor max number check' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/virtio: add barrier before reading the flags' " Kevin Traynor
2019-01-31 16:02   ` Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/fslmc: fix to reset portal memory before use' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/fslmc: fix ring mode to use correct cache settings' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/fslmc: fix to use correct physical core for logical core' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/dpaa2: fix bad check for not-null' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/fslmc: fix to convert error msg to warning' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'bus/fslmc: fix parse method for bus devices' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/dpaa2: fix device init for secondary process' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'doc: fix MAC address rewrite actions in prog guide' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/sfc: fix typo in preprocessor check' " Kevin Traynor
2019-01-31 15:48 ` [dpdk-stable] patch 'net/tap: allow full length names' " Kevin Traynor
2019-01-31 15:49 ` [dpdk-stable] patch 'net/tap: let kernel choose tun device name' " Kevin Traynor
2019-01-31 15:49 ` [dpdk-stable] patch 'net/i40e: perform basic validation on VF messages' " Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190131154901.5383-28-ktraynor@redhat.com \
    --to=ktraynor@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=roy.fan.zhang@intel.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).