patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath
@ 2021-07-08 10:25 Cheng Jiang
  2021-07-19  9:07 ` Maxime Coquelin
  2021-07-20  2:46 ` Xia, Chenbo
  0 siblings, 2 replies; 3+ messages in thread
From: Cheng Jiang @ 2021-07-08 10:25 UTC (permalink / raw)
  To: maxime.coquelin, Chenbo.Xia
  Cc: dev, jiayu.hu, yvonnex.yang, Cheng Jiang, stable

We assume that in the sync path, if there is no buffer wrap in the
avail descriptors fetched in a batch, there is no buffer wrap in the
used descriptors which need to be written back in this batch, but
this assumption is wrong in the async path since there are inflight
descriptors which are processed by the DMA device.

This patch refactors the batch copy code and adds used ring buffer
wrap check as a batch copy condition to fix this issue.

Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
Cc: stable@dpdk.org

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 lib/vhost/virtio_net.c | 163 ++++++++++++++++++++++++++++++++---------
 1 file changed, 128 insertions(+), 35 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 6bd00b746b..f4a2c88d8b 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -221,11 +221,6 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
 	uint16_t last_used_idx;
 	struct vring_packed_desc *desc_base;
 
-	if (vq->shadow_used_idx) {
-		do_data_copy_enqueue(dev, vq);
-		vhost_flush_enqueue_shadow_packed(dev, vq);
-	}
-
 	last_used_idx = vq->last_used_idx;
 	desc_base = &vq->desc_packed[last_used_idx];
 
@@ -1258,18 +1253,16 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline int
-virtio_dev_rx_batch_packed(struct virtio_net *dev,
+virtio_dev_rx_sync_batch_check(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mbuf **pkts)
+			   struct rte_mbuf **pkts,
+			   uint64_t *desc_addrs,
+			   uint64_t *lens)
 {
 	bool wrap_counter = vq->avail_wrap_counter;
 	struct vring_packed_desc *descs = vq->desc_packed;
 	uint16_t avail_idx = vq->last_avail_idx;
-	uint64_t desc_addrs[PACKED_BATCH_SIZE];
-	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
 	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
-	uint64_t lens[PACKED_BATCH_SIZE];
-	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
 	if (unlikely(avail_idx & PACKED_BATCH_MASK))
@@ -1307,6 +1300,84 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
 			return -1;
 	}
 
+	return 0;
+}
+
+static __rte_always_inline int
+virtio_dev_rx_async_batch_check(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts,
+			   uint64_t *desc_addrs,
+			   uint64_t *lens)
+{
+	bool wrap_counter = vq->avail_wrap_counter;
+	struct vring_packed_desc *descs = vq->desc_packed;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t used_idx = vq->last_used_idx;
+	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t cpy_threshold = vq->async_threshold;
+	uint16_t i;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (unlikely(pkts[i]->data_len >= cpy_threshold))
+			return -1;
+	}
+
+	if (unlikely(avail_idx & PACKED_BATCH_MASK))
+		return -1;
+
+	if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+		return -1;
+
+	if (unlikely((used_idx + PACKED_BATCH_SIZE) > vq->size))
+		return -1;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (unlikely(pkts[i]->next != NULL))
+			return -1;
+		if (unlikely(!desc_is_avail(&descs[avail_idx + i],
+					    wrap_counter)))
+			return -1;
+	}
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		lens[i] = descs[avail_idx + i].len;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
+			return -1;
+	}
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+						  descs[avail_idx + i].addr,
+						  &lens[i],
+						  VHOST_ACCESS_RW);
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (unlikely(!desc_addrs[i]))
+			return -1;
+		if (unlikely(lens[i] != descs[avail_idx + i].len))
+			return -1;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void
+virtio_dev_rx_batch_packed_copy(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts,
+			   uint64_t *desc_addrs,
+			   uint64_t *lens)
+{
+	uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
+	struct vring_packed_desc *descs = vq->desc_packed;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t ids[PACKED_BATCH_SIZE];
+	uint16_t i;
+
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
 		hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
@@ -1334,6 +1405,51 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
 		ids[i] = descs[avail_idx + i].id;
 
 	vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
+}
+
+static __rte_always_inline int
+virtio_dev_rx_sync_batch_packed(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts)
+{
+	uint64_t desc_addrs[PACKED_BATCH_SIZE];
+	uint64_t lens[PACKED_BATCH_SIZE];
+
+	if (virtio_dev_rx_sync_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
+		return -1;
+
+	if (vq->shadow_used_idx) {
+		do_data_copy_enqueue(dev, vq);
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+	}
+
+	virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
+
+	return 0;
+}
+
+static __rte_always_inline int
+virtio_dev_rx_async_batch_packed(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts,
+			   struct rte_mbuf **comp_pkts, uint32_t *pkt_done)
+{
+	uint16_t i;
+	uint64_t desc_addrs[PACKED_BATCH_SIZE];
+	uint64_t lens[PACKED_BATCH_SIZE];
+
+	if (virtio_dev_rx_async_batch_check(dev, vq, pkts, desc_addrs, lens) == -1)
+		return -1;
+
+	virtio_dev_rx_batch_packed_copy(dev, vq, pkts, desc_addrs, lens);
+
+	if (vq->shadow_used_idx) {
+		do_data_copy_enqueue(dev, vq);
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+	}
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+		comp_pkts[(*pkt_done)++] = pkts[i];
 
 	return 0;
 }
@@ -1375,7 +1491,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (count - pkt_idx >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_rx_batch_packed(dev, vq,
+			if (!virtio_dev_rx_sync_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				continue;
@@ -1728,29 +1844,6 @@ vhost_update_used_packed(struct vhost_virtqueue *vq,
 	vq->desc_packed[head_idx].flags = head_flags;
 }
 
-static __rte_always_inline int
-virtio_dev_rx_async_batch_packed(struct virtio_net *dev,
-			   struct vhost_virtqueue *vq,
-			   struct rte_mbuf **pkts,
-			   struct rte_mbuf **comp_pkts, uint32_t *pkt_done)
-{
-	uint16_t i;
-	uint32_t cpy_threshold = vq->async_threshold;
-
-	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		if (unlikely(pkts[i]->pkt_len >= cpy_threshold))
-			return -1;
-	}
-	if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
-		vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
-			comp_pkts[(*pkt_done)++] = pkts[i];
-
-		return 0;
-	}
-
-	return -1;
-}
-
 static __rte_always_inline int
 vhost_enqueue_async_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath
  2021-07-08 10:25 [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath Cheng Jiang
@ 2021-07-19  9:07 ` Maxime Coquelin
  2021-07-20  2:46 ` Xia, Chenbo
  1 sibling, 0 replies; 3+ messages in thread
From: Maxime Coquelin @ 2021-07-19  9:07 UTC (permalink / raw)
  To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yvonnex.yang, stable

Hi,

On 7/8/21 12:25 PM, Cheng Jiang wrote:
> We assume that in the sync path, if there is no buffer wrap in the
> avail descriptors fetched in a batch, there is no buffer wrap in the
> used descriptors which need to be written back in this batch, but
> this assumption is wrong in the async path since there are inflight
> descriptors which are processed by the DMA device.
> 
> This patch refactors the batch copy code and adds used ring buffer
> wrap check as a batch copy condition to fix this issue.
> 
> Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
>  lib/vhost/virtio_net.c | 163 ++++++++++++++++++++++++++++++++---------
>  1 file changed, 128 insertions(+), 35 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath
  2021-07-08 10:25 [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath Cheng Jiang
  2021-07-19  9:07 ` Maxime Coquelin
@ 2021-07-20  2:46 ` Xia, Chenbo
  1 sibling, 0 replies; 3+ messages in thread
From: Xia, Chenbo @ 2021-07-20  2:46 UTC (permalink / raw)
  To: Jiang, Cheng1, maxime.coquelin; +Cc: dev, Hu, Jiayu, Yang, YvonneX, stable

> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Thursday, July 8, 2021 6:25 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>;
> stable@dpdk.org
> Subject: [PATCH] vhost: fix async packed ring batch datapath
> 
> We assume that in the sync path, if there is no buffer wrap in the
> avail descriptors fetched in a batch, there is no buffer wrap in the
> used descriptors which need to be written back in this batch, but
> this assumption is wrong in the async path since there are inflight
> descriptors which are processed by the DMA device.
> 
> This patch refactors the batch copy code and adds used ring buffer
> wrap check as a batch copy condition to fix this issue.
> 
> Fixes: 873e8dad6f49 ("vhost: support packed ring in async datapath")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> ---
> --
> 2.29.2

Applied to next-virtio/main, thanks.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-07-20  2:46 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-08 10:25 [dpdk-stable] [PATCH] vhost: fix async packed ring batch datapath Cheng Jiang
2021-07-19  9:07 ` Maxime Coquelin
2021-07-20  2:46 ` Xia, Chenbo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).