patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v5 1/5] vhost: fix async vhost ops return type
       [not found] ` <20210716072436.19017-1-cheng1.jiang@intel.com>
@ 2021-07-16  7:24   ` Cheng Jiang
  0 siblings, 0 replies; 5+ messages in thread
From: Cheng Jiang @ 2021-07-16  7:24 UTC (permalink / raw)
  To: maxime.coquelin, Chenbo.Xia
  Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable

The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  4 +--
 lib/vhost/rte_vhost_async.h |  8 ++---
 lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
 4 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..e964d83837 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed
+	 *  number of descs processed, negative value means error
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param max_packets
 	 *  max number of packets could be completed
 	 * @return
-	 *  number of async descs completed
+	 *  number of async descs completed, negative value means error
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..16ae4d9e19 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_xfer;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_xfer;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_cpl;
 
 	if (!dev)
 		return 0;
@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_cpl = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_cpl >= 0) {
+			n_pkts_cpl = n_cpl;
+		} else {
+			VHOST_LOG_DATA(ERR,
+				"(%d) %s: failed to check completed copies for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-stable] [PATCH v6 1/5] vhost: fix async vhost ops return type
       [not found] ` <20210719081022.12949-1-cheng1.jiang@intel.com>
@ 2021-07-19  8:10   ` Cheng Jiang
  2021-07-21 14:20     ` Maxime Coquelin
  0 siblings, 1 reply; 5+ messages in thread
From: Cheng Jiang @ 2021-07-19  8:10 UTC (permalink / raw)
  To: maxime.coquelin, Chenbo.Xia
  Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable

The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  4 +--
 lib/vhost/rte_vhost_async.h |  8 ++---
 lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
 4 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 6faa31f5ad..e964d83837 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed
+	 *  number of descs processed, negative value means error
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param max_packets
 	 *  max number of packets could be completed
 	 * @return
-	 *  number of async descs completed
+	 *  number of async descs completed, negative value means error
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b93482587c..16ae4d9e19 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1528,6 +1528,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_xfer;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1608,8 +1609,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1632,8 +1642,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1903,6 +1920,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_xfer;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -1983,8 +2001,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2006,7 +2033,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2091,6 +2126,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_cpl;
 
 	if (!dev)
 		return 0;
@@ -2118,9 +2154,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_cpl = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_cpl >= 0) {
+			n_pkts_cpl = n_cpl;
+		} else {
+			VHOST_LOG_DATA(ERR,
+				"(%d) %s: failed to check completed copies for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [dpdk-stable] [PATCH v6 1/5] vhost: fix async vhost ops return type
  2021-07-19  8:10   ` [dpdk-stable] [PATCH v6 " Cheng Jiang
@ 2021-07-21 14:20     ` Maxime Coquelin
  0 siblings, 0 replies; 5+ messages in thread
From: Maxime Coquelin @ 2021-07-21 14:20 UTC (permalink / raw)
  To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, stable



On 7/19/21 10:10 AM, Cheng Jiang wrote:
> The async vhost callback ops should return negative value when there
> are something wrong in the callback, so the return type should be
> changed into int32_t. The issue in vhost example is also fixed.
> 
> Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
> Fixes: 819a71685826 ("vhost: fix async callback return type")
> Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
> Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
> Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
>  examples/vhost/ioat.c       |  4 +--
>  examples/vhost/ioat.h       |  4 +--
>  lib/vhost/rte_vhost_async.h |  8 ++---
>  lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
>  4 files changed, 61 insertions(+), 16 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-stable] [PATCH v7 1/5] vhost: fix async vhost ops return type
       [not found] ` <20210722040907.20468-1-cheng1.jiang@intel.com>
@ 2021-07-22  4:09   ` Cheng Jiang
  0 siblings, 0 replies; 5+ messages in thread
From: Cheng Jiang @ 2021-07-22  4:09 UTC (permalink / raw)
  To: maxime.coquelin, Chenbo.Xia
  Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable

The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  4 +--
 lib/vhost/rte_vhost_async.h |  8 ++---
 lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
 4 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..b57b5645b0 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 69ec66bba5..02d012ae23 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed
+	 *  number of descs processed, negative value means error
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param max_packets
 	 *  max number of packets could be completed
 	 * @return
-	 *  number of async descs completed
+	 *  number of async descs completed, negative value means error
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6e5d82c1a8..3ab5229f76 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_xfer;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_xfer;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_cpl;
 
 	if (!dev)
 		return 0;
@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_cpl = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_cpl >= 0) {
+			n_pkts_cpl = n_cpl;
+		} else {
+			VHOST_LOG_DATA(ERR,
+				"(%d) %s: failed to check completed copies for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [dpdk-stable] [PATCH v8 1/4] vhost: fix async vhost ops return type
       [not found] ` <20210723080937.20256-1-cheng1.jiang@intel.com>
@ 2021-07-23  8:09   ` Cheng Jiang
  0 siblings, 0 replies; 5+ messages in thread
From: Cheng Jiang @ 2021-07-23  8:09 UTC (permalink / raw)
  To: maxime.coquelin, Chenbo.Xia
  Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable

The async vhost callback ops should return negative value when there
are something wrong in the callback, so the return type should be
changed into int32_t. The issue in vhost example is also fixed.

Fixes: cd6760da1076 ("vhost: introduce async enqueue for split ring")
Fixes: 819a71685826 ("vhost: fix async callback return type")
Fixes: 6b3c81db8bb7 ("vhost: simplify async copy completion")
Fixes: abec60e7115d ("examples/vhost: support vhost async data path")
Fixes: 6e9a9d2a02ae ("examples/vhost: fix ioat dependency")
Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 examples/vhost/ioat.c       |  4 +--
 examples/vhost/ioat.h       |  8 ++---
 lib/vhost/rte_vhost_async.h |  8 ++---
 lib/vhost/virtio_net.c      | 61 ++++++++++++++++++++++++++++++++-----
 4 files changed, 63 insertions(+), 18 deletions(-)

diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
index 2a2c2d7202..457f8171f0 100644
--- a/examples/vhost/ioat.c
+++ b/examples/vhost/ioat.c
@@ -122,7 +122,7 @@ open_ioat(const char *value)
 	return ret;
 }
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count)
@@ -168,7 +168,7 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
 	return i_desc;
 }
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets)
diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
index 1aa28ed6a3..62e163c585 100644
--- a/examples/vhost/ioat.h
+++ b/examples/vhost/ioat.h
@@ -27,12 +27,12 @@ struct dma_for_vhost {
 #ifdef RTE_RAW_IOAT
 int open_ioat(const char *value);
 
-uint32_t
+int32_t
 ioat_transfer_data_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data, uint16_t count);
 
-uint32_t
+int32_t
 ioat_check_completed_copies_cb(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
@@ -42,7 +42,7 @@ static int open_ioat(const char *value __rte_unused)
 	return -1;
 }
 
-static uint32_t
+static int32_t
 ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
 		struct rte_vhost_async_desc *descs __rte_unused,
 		struct rte_vhost_async_status *opaque_data __rte_unused,
@@ -51,7 +51,7 @@ ioat_transfer_data_cb(int vid __rte_unused, uint16_t queue_id __rte_unused,
 	return -1;
 }
 
-static uint32_t
+static int32_t
 ioat_check_completed_copies_cb(int vid __rte_unused,
 		uint16_t queue_id __rte_unused,
 		struct rte_vhost_async_status *opaque_data __rte_unused,
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 69ec66bba5..02d012ae23 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -59,9 +59,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param count
 	 *  number of elements in the "descs" array
 	 * @return
-	 *  number of descs processed
+	 *  number of descs processed, negative value means error
 	 */
-	uint32_t (*transfer_data)(int vid, uint16_t queue_id,
+	int32_t (*transfer_data)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_desc *descs,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t count);
@@ -76,9 +76,9 @@ struct rte_vhost_async_channel_ops {
 	 * @param max_packets
 	 *  max number of packets could be completed
 	 * @return
-	 *  number of async descs completed
+	 *  number of async descs completed, negative value means error
 	 */
-	uint32_t (*check_completed_copies)(int vid, uint16_t queue_id,
+	int32_t (*check_completed_copies)(int vid, uint16_t queue_id,
 		struct rte_vhost_async_status *opaque_data,
 		uint16_t max_packets);
 };
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6e5d82c1a8..3ab5229f76 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1644,6 +1644,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	int32_t n_xfer;
 	struct {
 		uint16_t pkt_idx;
 		uint16_t last_avail_idx;
@@ -1724,8 +1725,17 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
 			BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid,
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 
@@ -1748,8 +1758,15 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	}
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid,
-				queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -1996,6 +2013,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_descs;
+	int32_t n_xfer;
 
 	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
 	struct iovec *vec_pool = vq->vec_pool;
@@ -2078,8 +2096,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
 			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
-			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
-				tdes, 0, pkt_burst_idx);
+			n_xfer = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			if (n_xfer >= 0) {
+				n_pkts = n_xfer;
+			} else {
+				VHOST_LOG_DATA(ERR,
+					"(%d) %s: failed to transfer data for queue id %d.\n",
+					dev->vid, __func__, queue_id);
+				n_pkts = 0;
+			}
+
 			iovec_idx = 0;
 			it_idx = 0;
 			segs_await = 0;
@@ -2101,7 +2128,15 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
-		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		if (n_xfer >= 0) {
+			n_pkts = n_xfer;
+		} else {
+			VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts = 0;
+		}
+
 		vq->async_pkts_inflight_n += n_pkts;
 
 		if (unlikely(n_pkts < pkt_burst_idx))
@@ -2188,6 +2223,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
+	int32_t n_cpl;
 
 	if (!dev)
 		return 0;
@@ -2215,9 +2251,18 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
 		vq_size, vq->async_pkts_inflight_n);
 
-	if (count > vq->async_last_pkts_n)
-		n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+	if (count > vq->async_last_pkts_n) {
+		n_cpl = vq->async_ops.check_completed_copies(vid,
 			queue_id, 0, count - vq->async_last_pkts_n);
+		if (n_cpl >= 0) {
+			n_pkts_cpl = n_cpl;
+		} else {
+			VHOST_LOG_DATA(ERR,
+				"(%d) %s: failed to check completed copies for queue id %d.\n",
+				dev->vid, __func__, queue_id);
+			n_pkts_cpl = 0;
+		}
+	}
 	n_pkts_cpl += vq->async_last_pkts_n;
 
 	n_pkts_put = RTE_MIN(count, n_pkts_cpl);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2021-07-23  8:27 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20210602042802.31943-1-cheng1.jiang@intel.com>
     [not found] ` <20210716072436.19017-1-cheng1.jiang@intel.com>
2021-07-16  7:24   ` [dpdk-stable] [PATCH v5 1/5] vhost: fix async vhost ops return type Cheng Jiang
     [not found] ` <20210719081022.12949-1-cheng1.jiang@intel.com>
2021-07-19  8:10   ` [dpdk-stable] [PATCH v6 " Cheng Jiang
2021-07-21 14:20     ` Maxime Coquelin
     [not found] ` <20210722040907.20468-1-cheng1.jiang@intel.com>
2021-07-22  4:09   ` [dpdk-stable] [PATCH v7 " Cheng Jiang
     [not found] ` <20210723080937.20256-1-cheng1.jiang@intel.com>
2021-07-23  8:09   ` [dpdk-stable] [PATCH v8 1/4] " Cheng Jiang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).