DPDK patches and discussions
 help / color / mirror / Atom feed
From: Cheng Jiang <cheng1.jiang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,
	wenwux.ma@intel.com, yuanx.wang@intel.com,
	xingguang.he@intel.com, Cheng Jiang <cheng1.jiang@intel.com>
Subject: [PATCH v2 1/3] vhost: remove redundant copy for packed shadow used ring
Date: Fri, 13 Jan 2023 02:56:51 +0000	[thread overview]
Message-ID: <20230113025653.16583-2-cheng1.jiang@intel.com> (raw)
In-Reply-To: <20230113025653.16583-1-cheng1.jiang@intel.com>

In the packed ring enqueue data path of the current asynchronous
Vhost design, the shadow used ring is first copied to the sync
shadow used ring, and then it will be moved to the async shadow
used ring for some historical reasons. This is completely unnecessary.
This patch removes redundant copy for the shadow used ring. The async
shadow used ring will be updated directly.

Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
 lib/vhost/virtio_net.c | 66 ++++++++++++++++++++----------------------
 1 file changed, 31 insertions(+), 35 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 9abf752f30..7c3ec128a0 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -572,6 +572,26 @@ vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 	}
 }
 
+static __rte_always_inline void
+vhost_async_shadow_enqueue_packed(struct vhost_virtqueue *vq,
+				   uint32_t *len,
+				   uint16_t *id,
+				   uint16_t *count,
+				   uint16_t num_buffers)
+{
+	uint16_t i;
+	struct vhost_async *async = vq->async;
+
+	for (i = 0; i < num_buffers; i++) {
+		async->buffers_packed[async->buffer_idx_packed].id  = id[i];
+		async->buffers_packed[async->buffer_idx_packed].len = len[i];
+		async->buffers_packed[async->buffer_idx_packed].count = count[i];
+		async->buffer_idx_packed++;
+		if (async->buffer_idx_packed >= vq->size)
+			async->buffer_idx_packed -= vq->size;
+	}
+}
+
 static __rte_always_inline void
 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 				   struct vhost_virtqueue *vq,
@@ -1647,23 +1667,6 @@ store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem
 	}
 }
 
-static __rte_always_inline void
-store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
-		struct vring_used_elem_packed *d_ring,
-		uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
-{
-	size_t elem_size = sizeof(struct vring_used_elem_packed);
-
-	if (d_idx + count <= ring_size) {
-		rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
-	} else {
-		uint16_t size = ring_size - d_idx;
-
-		rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
-		rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
-	}
-}
-
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
@@ -1822,7 +1825,8 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
 	if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
 		return -1;
 
-	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
+	vhost_async_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
+					buffer_desc_count, *nr_buffers);
 
 	return 0;
 }
@@ -1852,6 +1856,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 {
 	uint16_t descs_err = 0;
 	uint16_t buffers_err = 0;
+	struct vhost_async *async = vq->async;
 	struct async_inflight_info *pkts_info = vq->async->pkts_info;
 
 	*pkt_idx -= nr_err;
@@ -1869,7 +1874,10 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 		vq->avail_wrap_counter ^= 1;
 	}
 
-	vq->shadow_used_idx -= buffers_err;
+	if (async->buffer_idx_packed >= buffers_err)
+		async->buffer_idx_packed -= buffers_err;
+	else
+		async->buffer_idx_packed = async->buffer_idx_packed + vq->size - buffers_err;
 }
 
 static __rte_noinline uint32_t
@@ -1921,23 +1929,11 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
 		dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
 	}
 
-	if (likely(vq->shadow_used_idx)) {
-		/* keep used descriptors. */
-		store_dma_desc_info_packed(vq->shadow_used_packed, async->buffers_packed,
-					vq->size, 0, async->buffer_idx_packed,
-					vq->shadow_used_idx);
-
-		async->buffer_idx_packed += vq->shadow_used_idx;
-		if (async->buffer_idx_packed >= vq->size)
-			async->buffer_idx_packed -= vq->size;
-
-		async->pkts_idx += pkt_idx;
-		if (async->pkts_idx >= vq->size)
-			async->pkts_idx -= vq->size;
+	async->pkts_idx += pkt_idx;
+	if (async->pkts_idx >= vq->size)
+		async->pkts_idx -= vq->size;
 
-		vq->shadow_used_idx = 0;
-		async->pkts_inflight_n += pkt_idx;
-	}
+	async->pkts_inflight_n += pkt_idx;
 
 	return pkt_idx;
 }
-- 
2.35.1


  reply	other threads:[~2023-01-13  3:46 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-20  0:44 [PATCH 0/3] Async vhost packed ring optimization Cheng Jiang
2022-12-20  0:44 ` [PATCH 1/3] vhost: remove redundant copy for packed shadow used ring Cheng Jiang
2022-12-20  0:44 ` [PATCH 2/3] vhost: add batch enqueue in async vhost packed ring Cheng Jiang
2022-12-20  0:44 ` [PATCH 3/3] vhost: add batch dequeue " Cheng Jiang
2023-01-13  2:56 ` [PATCH v2 0/3] Async vhost packed ring optimization Cheng Jiang
2023-01-13  2:56   ` Cheng Jiang [this message]
2023-02-02  9:13     ` [PATCH v2 1/3] vhost: remove redundant copy for packed shadow used ring Maxime Coquelin
2023-01-13  2:56   ` [PATCH v2 2/3] vhost: add batch enqueue in async vhost packed ring Cheng Jiang
2023-02-02  9:31     ` Maxime Coquelin
2023-01-13  2:56   ` [PATCH v2 3/3] vhost: add batch dequeue " Cheng Jiang
2023-02-02 10:07     ` Maxime Coquelin
2023-02-03 14:59   ` [PATCH v2 0/3] Async vhost packed ring optimization Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230113025653.16583-2-cheng1.jiang@intel.com \
    --to=cheng1.jiang@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=wenwux.ma@intel.com \
    --cc=xingguang.he@intel.com \
    --cc=xuan.ding@intel.com \
    --cc=yuanx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).