From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com,
yuanx.wang@intel.com, wenwux.ma@intel.com,
bruce.richardson@intel.com, john.mcnamara@intel.com,
david.marchand@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v1 14/14] vhost: merge sync and async mbuf to desc filling
Date: Mon, 18 Oct 2021 15:02:29 +0200 [thread overview]
Message-ID: <20211018130229.308694-15-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20211018130229.308694-1-maxime.coquelin@redhat.com>
This patches merges copy_mbuf_to_desc() used by the sync
path with async_mbuf_to_desc() used by the async path.
Most of these complex functions are identical, so merging
them will make the maintenance easier.
In order not to degrade performance, the patch introduces
a boolean function parameter to specify whether it is called
in async context. This boolean is statically passed to this
always-inlined function, so the compiler will optimize this
out.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/virtio_net.c | 153 +++++++----------------------------------
1 file changed, 26 insertions(+), 127 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 7e66113006..0e1fd01e31 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -913,9 +913,9 @@ sync_mbuf_to_desc_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers)
+mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t nr_vec, uint16_t num_buffers, bool is_async)
{
uint32_t vec_idx = 0;
uint32_t mbuf_offset, mbuf_avail;
@@ -925,115 +925,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t hdr_addr;
struct rte_mbuf *hdr_mbuf;
struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
-
- if (unlikely(m == NULL))
- return -1;
-
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
- return -1;
-
- hdr_mbuf = m;
- hdr_addr = buf_addr;
- if (unlikely(buf_len < dev->vhost_hlen)) {
- memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
- hdr = &tmp_hdr;
- } else
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
-
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
-
- if (unlikely(buf_len < dev->vhost_hlen)) {
- buf_offset = dev->vhost_hlen - buf_len;
- vec_idx++;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
- buf_avail = buf_len - buf_offset;
- } else {
- buf_offset = dev->vhost_hlen;
- buf_avail = buf_len - dev->vhost_hlen;
- }
-
- mbuf_avail = rte_pktmbuf_data_len(m);
- mbuf_offset = 0;
- while (mbuf_avail != 0 || m->next != NULL) {
- /* done with current buf, get the next one */
- if (buf_avail == 0) {
- vec_idx++;
- if (unlikely(vec_idx >= nr_vec))
- goto error;
-
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
- buf_offset = 0;
- buf_avail = buf_len;
- }
-
- /* done with current mbuf, get the next one */
- if (mbuf_avail == 0) {
- m = m->next;
-
- mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
- }
-
- if (hdr_addr) {
- virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
- if (rxvq_is_mergeable(dev))
- ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
- num_buffers);
-
- if (unlikely(hdr == &tmp_hdr)) {
- copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
- } else {
- PRINT_PACKET(dev, (uintptr_t)hdr_addr,
- dev->vhost_hlen, 0);
- vhost_log_cache_write_iova(dev, vq,
- buf_vec[0].buf_iova,
- dev->vhost_hlen);
- }
-
- hdr_addr = 0;
- }
-
- cpy_len = RTE_MIN(buf_avail, mbuf_avail);
-
- sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_addr + buf_offset,
- buf_iova + buf_offset, cpy_len);
-
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- buf_avail -= cpy_len;
- buf_offset += cpy_len;
- }
-
- return 0;
-error:
- return -1;
-}
-
-static __rte_always_inline int
-async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers)
-{
struct vhost_async *async = vq->async;
- struct rte_mbuf *hdr_mbuf;
- struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
- uint64_t buf_addr, buf_iova;
- uint64_t hdr_addr;
- uint32_t vec_idx = 0;
- uint32_t mbuf_offset, mbuf_avail;
- uint32_t buf_offset, buf_avail;
- uint32_t cpy_len, buf_len;
if (unlikely(m == NULL))
return -1;
@@ -1071,8 +963,10 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
- if (async_iter_initialize(async))
- return -1;
+ if (is_async) {
+ if (async_iter_initialize(async))
+ return -1;
+ }
while (mbuf_avail != 0 || m->next != NULL) {
/* done with current buf, get the next one */
@@ -1086,7 +980,7 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
- buf_avail = buf_len;
+ buf_avail = buf_len;
}
/* done with current mbuf, get the next one */
@@ -1094,7 +988,7 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
m = m->next;
mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_avail = rte_pktmbuf_data_len(m);
}
if (hdr_addr) {
@@ -1118,9 +1012,14 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_iova + buf_offset, cpy_len) < 0) {
- goto error;
+ if (is_async) {
+ if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_iova + buf_offset, cpy_len) < 0)
+ goto error;
+ } else {
+ sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len);
}
mbuf_avail -= cpy_len;
@@ -1129,11 +1028,13 @@ async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
buf_offset += cpy_len;
}
- async_iter_finalize(async);
+ if (is_async)
+ async_iter_finalize(async);
return 0;
error:
- async_iter_cancel(async);
+ if (is_async)
+ async_iter_cancel(async);
return -1;
}
@@ -1192,7 +1093,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
avail_idx -= vq->size;
}
- if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+ if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
return -1;
vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
@@ -1236,9 +1137,8 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
- buf_vec, nr_vec,
- num_buffers) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
+ num_buffers, false) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
@@ -1582,7 +1482,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
- if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
@@ -1751,8 +1651,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
avail_idx -= vq->size;
}
- if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
- *nr_buffers) < 0))
+ if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
return -1;
vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
--
2.31.1
prev parent reply other threads:[~2021-10-18 13:04 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-18 13:02 [dpdk-dev] [PATCH v1 00/14] vhost: clean-up and simplify async implementation Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 01/14] vhost: move async data in a dedicated structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 02/14] vhost: hide inflight async structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 03/14] vhost: simplify async IO vectors Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 04/14] vhost: simplify async IO vectors iterators Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 05/14] vhost: remove async batch threshold Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 06/14] vhost: introduce specific iovec structure Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 07/14] vhost: remove useless fields in async iterator struct Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 08/14] vhost: improve IO vector logic Maxime Coquelin
2021-10-25 7:22 ` Hu, Jiayu
2021-10-25 10:02 ` Maxime Coquelin
2021-10-26 7:07 ` Hu, Jiayu
2021-10-26 7:27 ` Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 09/14] vhost: remove notion of async descriptor Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 10/14] vhost: simplify async enqueue completion Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 11/14] vhost: simplify getting the first in-flight index Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 12/14] vhost: prepare async for mbuf to desc refactoring Maxime Coquelin
2021-10-18 13:02 ` [dpdk-dev] [PATCH v1 13/14] vhost: prepare sync " Maxime Coquelin
2021-10-18 13:02 ` Maxime Coquelin [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211018130229.308694-15-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=bruce.richardson@intel.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=john.mcnamara@intel.com \
--cc=wenwux.ma@intel.com \
--cc=yuanx.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).