DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com,
	yuanx.wang@intel.com, wenwux.ma@intel.com,
	bruce.richardson@intel.com, john.mcnamara@intel.com,
	david.marchand@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v1 13/14] vhost: prepare sync for mbuf to desc refactoring
Date: Mon, 18 Oct 2021 15:02:28 +0200	[thread overview]
Message-ID: <20211018130229.308694-14-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20211018130229.308694-1-maxime.coquelin@redhat.com>

This patch extracts the descriptors buffers filling
from copy_mbuf_to_desc() into a dedicated function as a
preliminary step of merging copy_mubf_to_desc() and
async_mbuf_to_desc().

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/virtio_net.c | 73 +++++++++++++++++++++---------------------
 1 file changed, 36 insertions(+), 37 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 9d3337abad..7e66113006 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -888,6 +888,30 @@ async_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return 0;
 }
 
+static __rte_always_inline void
+sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
+		struct rte_mbuf *m, uint32_t mbuf_offset,
+		uint64_t buf_addr, uint64_t buf_iova, uint32_t cpy_len)
+{
+	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+
+	if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) {
+		rte_memcpy((void *)((uintptr_t)(buf_addr)),
+				rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+				cpy_len);
+		vhost_log_cache_write_iova(dev, vq, buf_iova, cpy_len);
+		PRINT_PACKET(dev, (uintptr_t)(buf_addr), cpy_len, 0);
+	} else {
+		batch_copy[vq->batch_copy_nb_elems].dst =
+			(void *)((uintptr_t)(buf_addr));
+		batch_copy[vq->batch_copy_nb_elems].src =
+			rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+		batch_copy[vq->batch_copy_nb_elems].log_addr = buf_iova;
+		batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+		vq->batch_copy_nb_elems++;
+	}
+}
+
 static __rte_always_inline int
 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			    struct rte_mbuf *m, struct buf_vector *buf_vec,
@@ -900,23 +924,17 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	uint32_t cpy_len;
 	uint64_t hdr_addr;
 	struct rte_mbuf *hdr_mbuf;
-	struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
 	struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
-	int error = 0;
 
-	if (unlikely(m == NULL)) {
-		error = -1;
-		goto out;
-	}
+	if (unlikely(m == NULL))
+		return -1;
 
 	buf_addr = buf_vec[vec_idx].buf_addr;
 	buf_iova = buf_vec[vec_idx].buf_iova;
 	buf_len = buf_vec[vec_idx].buf_len;
 
-	if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
-		error = -1;
-		goto out;
-	}
+	if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
+		return -1;
 
 	hdr_mbuf = m;
 	hdr_addr = buf_addr;
@@ -947,10 +965,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		/* done with current buf, get the next one */
 		if (buf_avail == 0) {
 			vec_idx++;
-			if (unlikely(vec_idx >= nr_vec)) {
-				error = -1;
-				goto out;
-			}
+			if (unlikely(vec_idx >= nr_vec))
+				goto error;
 
 			buf_addr = buf_vec[vec_idx].buf_addr;
 			buf_iova = buf_vec[vec_idx].buf_iova;
@@ -989,26 +1005,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
-		if (likely(cpy_len > MAX_BATCH_LEN ||
-					vq->batch_copy_nb_elems >= vq->size)) {
-			rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
-				rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
-				cpy_len);
-			vhost_log_cache_write_iova(dev, vq,
-						   buf_iova + buf_offset,
-						   cpy_len);
-			PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
-				cpy_len, 0);
-		} else {
-			batch_copy[vq->batch_copy_nb_elems].dst =
-				(void *)((uintptr_t)(buf_addr + buf_offset));
-			batch_copy[vq->batch_copy_nb_elems].src =
-				rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
-			batch_copy[vq->batch_copy_nb_elems].log_addr =
-				buf_iova + buf_offset;
-			batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
-			vq->batch_copy_nb_elems++;
-		}
+		sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+				buf_addr + buf_offset,
+				buf_iova + buf_offset, cpy_len);
 
 		mbuf_avail  -= cpy_len;
 		mbuf_offset += cpy_len;
@@ -1016,9 +1015,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		buf_offset += cpy_len;
 	}
 
-out:
-
-	return error;
+	return 0;
+error:
+	return -1;
 }
 
 static __rte_always_inline int
-- 
2.31.1


  parent reply	other threads:[~2021-10-18 13:04 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-18 13:02 [dpdk-dev] [PATCH v1 00/14] vhost: clean-up and simplify async implementation Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 01/14] vhost: move async data in a dedicated structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 02/14] vhost: hide inflight async structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 03/14] vhost: simplify async IO vectors Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 04/14] vhost: simplify async IO vectors iterators Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 05/14] vhost: remove async batch threshold Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 06/14] vhost: introduce specific iovec structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 07/14] vhost: remove useless fields in async iterator struct Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 08/14] vhost: improve IO vector logic Maxime Coquelin
2021-10-25  7:22   ` Hu, Jiayu
2021-10-25 10:02     ` Maxime Coquelin
2021-10-26  7:07       ` Hu, Jiayu
2021-10-26  7:27         ` Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 09/14] vhost: remove notion of async descriptor Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 10/14] vhost: simplify async enqueue completion Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 11/14] vhost: simplify getting the first in-flight index Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 12/14] vhost: prepare async for mbuf to desc refactoring Maxime Coquelin
2021-10-18 13:02 ` Maxime Coquelin [this message]
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 14/14] vhost: merge sync and async mbuf to desc filling Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211018130229.308694-14-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=bruce.richardson@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=wenwux.ma@intel.com \
    --cc=yuanx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).