From: xuan.ding@intel.com
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, cheng1.jiang@intel.com,
sunil.pai.g@intel.com, liangma@liangbit.com,
Xuan Ding <xuan.ding@intel.com>
Subject: [PATCH v1 3/5] vhost: merge sync and async descriptor to mbuf filling
Date: Thu, 7 Apr 2022 15:25:44 +0000 [thread overview]
Message-ID: <20220407152546.38167-4-xuan.ding@intel.com> (raw)
In-Reply-To: <20220407152546.38167-1-xuan.ding@intel.com>
From: Xuan Ding <xuan.ding@intel.com>
This patches refactors copy_desc_to_mbuf() used by the sync
path to support both sync and async descriptor to mbuf filling.
Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
lib/vhost/vhost.h | 1 +
lib/vhost/virtio_net.c | 47 ++++++++++++++++++++++++++++++++----------
2 files changed, 37 insertions(+), 11 deletions(-)
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index a9edc271aa..9209558465 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -177,6 +177,7 @@ extern struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
* inflight async packet information
*/
struct async_inflight_info {
+ struct virtio_net_hdr nethdr;
struct rte_mbuf *mbuf;
uint16_t descs; /* num of descs inflight */
uint16_t nr_buffers; /* num of buffers inflight for packed ring */
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 709ff483a3..382e953c2d 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2482,10 +2482,10 @@ copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
}
static __rte_always_inline int
-copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct buf_vector *buf_vec, uint16_t nr_vec,
struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
- bool legacy_ol_flags)
+ bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
{
uint32_t buf_avail, buf_offset;
uint64_t buf_addr, buf_iova, buf_len;
@@ -2496,6 +2496,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
uint16_t vec_idx = 0;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info;
buf_addr = buf_vec[vec_idx].buf_addr;
buf_iova = buf_vec[vec_idx].buf_iova;
@@ -2548,12 +2550,25 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+
+ if (is_async) {
+ pkts_info = async->pkts_info;
+ if (async_iter_initialize(dev, async))
+ return -1;
+ }
+
while (1) {
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- sync_fill_seg(dev, vq, m, mbuf_offset,
- buf_addr + buf_offset,
- buf_iova + buf_offset, cpy_len, true);
+ if (is_async) {
+ if (async_fill_seg(dev, vq, m, mbuf_offset,
+ buf_iova + buf_offset, cpy_len, false) < 0)
+ goto error;
+ } else {
+ sync_fill_seg(dev, vq, m, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len, true);
+ }
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
@@ -2602,11 +2617,20 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
prev->data_len = mbuf_offset;
m->pkt_len += mbuf_offset;
- if (hdr)
- vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+ if (hdr) {
+ if (is_async) {
+ async_iter_finalize(async);
+ pkts_info[slot_idx].nethdr = *hdr;
+ } else {
+ vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
+ }
+ }
return 0;
error:
+ if (is_async)
+ async_iter_cancel(async);
+
return -1;
}
@@ -2738,8 +2762,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
break;
}
- err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
- mbuf_pool, legacy_ol_flags);
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+ mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
@@ -2750,6 +2774,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
i++;
break;
}
+
}
if (dropped)
@@ -2931,8 +2956,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
return -1;
}
- err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
- mbuf_pool, legacy_ol_flags);
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
+ mbuf_pool, legacy_ol_flags, 0, false);
if (unlikely(err)) {
if (!allocerr_warned) {
VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
--
2.17.1
next prev parent reply other threads:[~2022-04-07 15:31 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-07 15:25 [PATCH v1 0/5] vhost: support async dequeue data path xuan.ding
2022-04-07 15:25 ` [PATCH v1 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-04-07 15:25 ` [PATCH v1 2/5] vhost: prepare async " xuan.ding
2022-04-07 15:25 ` xuan.ding [this message]
2022-04-07 15:25 ` [PATCH v1 4/5] vhost: support async dequeue for split ring xuan.ding
2022-04-07 15:25 ` [PATCH v1 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-04-11 10:00 ` [PATCH v2 0/5] vhost: " xuan.ding
2022-04-11 10:00 ` [PATCH v2 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-04-11 10:00 ` [PATCH v2 2/5] vhost: prepare async " xuan.ding
2022-04-11 10:00 ` [PATCH v2 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-04-11 10:00 ` [PATCH v2 4/5] vhost: support async dequeue for split ring xuan.ding
2022-04-11 10:00 ` [PATCH v2 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-04-19 3:43 ` [PATCH v3 0/5] vhost: " xuan.ding
2022-04-19 3:43 ` [PATCH v3 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-04-22 15:30 ` Maxime Coquelin
2022-04-19 3:43 ` [PATCH v3 2/5] vhost: prepare async " xuan.ding
2022-04-22 15:32 ` Maxime Coquelin
2022-04-19 3:43 ` [PATCH v3 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-04-22 11:06 ` David Marchand
2022-04-22 15:46 ` Maxime Coquelin
2022-04-24 2:02 ` Ding, Xuan
2022-04-22 15:43 ` Maxime Coquelin
2022-04-19 3:43 ` [PATCH v3 4/5] vhost: support async dequeue for split ring xuan.ding
2022-04-19 3:43 ` [PATCH v3 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-05 6:23 ` [PATCH v4 0/5] vhost: " xuan.ding
2022-05-05 6:23 ` [PATCH v4 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-05-05 7:37 ` Yang, YvonneX
2022-05-05 6:23 ` [PATCH v4 2/5] vhost: prepare async " xuan.ding
2022-05-05 7:38 ` Yang, YvonneX
2022-05-05 6:23 ` [PATCH v4 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-05-05 7:39 ` Yang, YvonneX
2022-05-05 6:23 ` [PATCH v4 4/5] vhost: support async dequeue for split ring xuan.ding
2022-05-05 7:40 ` Yang, YvonneX
2022-05-05 19:36 ` Maxime Coquelin
2022-05-05 6:23 ` [PATCH v4 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-05 7:39 ` Yang, YvonneX
2022-05-05 19:38 ` Maxime Coquelin
2022-05-05 19:52 ` [PATCH v4 0/5] vhost: " Maxime Coquelin
2022-05-06 1:49 ` Ding, Xuan
2022-05-13 2:00 ` [PATCH v5 " xuan.ding
2022-05-13 2:00 ` [PATCH v5 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-05-13 2:00 ` [PATCH v5 2/5] vhost: prepare async " xuan.ding
2022-05-13 2:00 ` [PATCH v5 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-05-13 2:00 ` [PATCH v5 4/5] vhost: support async dequeue for split ring xuan.ding
2022-05-13 2:24 ` Stephen Hemminger
2022-05-13 2:33 ` Ding, Xuan
2022-05-13 2:00 ` [PATCH v5 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-13 2:50 ` [PATCH v6 0/5] vhost: " xuan.ding
2022-05-13 2:50 ` [PATCH v6 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-05-13 2:50 ` [PATCH v6 2/5] vhost: prepare async " xuan.ding
2022-05-13 2:50 ` [PATCH v6 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-05-13 2:50 ` [PATCH v6 4/5] vhost: support async dequeue for split ring xuan.ding
2022-05-13 2:50 ` [PATCH v6 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-13 3:27 ` Xia, Chenbo
2022-05-13 3:51 ` Ding, Xuan
2022-05-16 2:43 ` [PATCH v7 0/5] vhost: " xuan.ding
2022-05-16 2:43 ` [PATCH v7 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-05-16 2:43 ` [PATCH v7 2/5] vhost: prepare async " xuan.ding
2022-05-16 2:43 ` [PATCH v7 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-05-16 2:43 ` [PATCH v7 4/5] vhost: support async dequeue for split ring xuan.ding
2022-05-16 5:52 ` Hu, Jiayu
2022-05-16 6:10 ` Ding, Xuan
2022-05-16 2:43 ` [PATCH v7 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-16 11:10 ` [PATCH v8 0/5] vhost: " xuan.ding
2022-05-16 11:10 ` [PATCH v8 1/5] vhost: prepare sync for descriptor to mbuf refactoring xuan.ding
2022-05-16 11:10 ` [PATCH v8 2/5] vhost: prepare async " xuan.ding
2022-05-16 11:10 ` [PATCH v8 3/5] vhost: merge sync and async descriptor to mbuf filling xuan.ding
2022-05-16 11:10 ` [PATCH v8 4/5] vhost: support async dequeue for split ring xuan.ding
2022-06-16 14:38 ` David Marchand
2022-06-16 14:40 ` David Marchand
2022-06-17 6:34 ` Ding, Xuan
2022-05-16 11:10 ` [PATCH v8 5/5] examples/vhost: support async dequeue data path xuan.ding
2022-05-17 13:22 ` [PATCH v8 0/5] vhost: " Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220407152546.38167-4-xuan.ding@intel.com \
--to=xuan.ding@intel.com \
--cc=chenbo.xia@intel.com \
--cc=cheng1.jiang@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=liangma@liangbit.com \
--cc=maxime.coquelin@redhat.com \
--cc=sunil.pai.g@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).