From: Cheng Jiang <cheng1.jiang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,
wenwux.ma@intel.com, yuanx.wang@intel.com,
yvonnex.yang@intel.com, xingguang.he@intel.com,
Cheng Jiang <cheng1.jiang@intel.com>
Subject: [PATCH 3/3] vhost: add batch dequeue in async vhost packed ring
Date: Tue, 20 Dec 2022 00:44:15 +0000 [thread overview]
Message-ID: <20221220004415.29576-4-cheng1.jiang@intel.com> (raw)
In-Reply-To: <20221220004415.29576-1-cheng1.jiang@intel.com>
Add batch dequeue function in asynchronous vhost packed ring to
improve the performance. Chained mbufs are not supported, it will
be handled in single dequeue function.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
lib/vhost/virtio_net.c | 170 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 167 insertions(+), 3 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index ac8c404327..9cd69fc7bf 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -450,6 +450,23 @@ vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
}
}
+static __rte_always_inline void
+vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *ids)
+{
+ uint16_t i;
+ struct vhost_async *async = vq->async;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ async->buffers_packed[async->buffer_idx_packed].id = ids[i];
+ async->buffers_packed[async->buffer_idx_packed].len = 0;
+ async->buffers_packed[async->buffer_idx_packed].count = 1;
+
+ async->buffer_idx_packed++;
+ if (async->buffer_idx_packed >= vq->size)
+ async->buffer_idx_packed -= vq->size;
+ }
+}
+
static __rte_always_inline void
vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
uint16_t id)
@@ -3193,6 +3210,80 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
return -1;
}
+static __rte_always_inline int
+vhost_async_tx_batch_packed_check(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts,
+ uint16_t avail_idx,
+ uintptr_t *desc_addrs,
+ uint64_t *lens,
+ uint16_t *ids,
+ int16_t dma_id,
+ uint16_t vchan_id)
+{
+ bool wrap = vq->avail_wrap_counter;
+ struct vring_packed_desc *descs = vq->desc_packed;
+ uint64_t buf_lens[PACKED_BATCH_SIZE];
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ uint16_t flags, i;
+
+ if (unlikely(avail_idx & PACKED_BATCH_MASK))
+ return -1;
+ if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+ return -1;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ flags = descs[avail_idx + i].flags;
+ if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
+ (wrap == !!(flags & VRING_DESC_F_USED)) ||
+ (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
+ return -1;
+ }
+
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ lens[i] = descs[avail_idx + i].len;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ desc_addrs[i] = descs[avail_idx + i].addr;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(!desc_addrs[i]))
+ return -1;
+ if (unlikely((lens[i] != descs[avail_idx + i].len)))
+ return -1;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+ goto err;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
+ goto err;
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ pkts[i]->pkt_len = lens[i] - buf_offset;
+ pkts[i]->data_len = pkts[i]->pkt_len;
+ ids[i] = descs[avail_idx + i].id;
+ }
+
+ if (rte_dma_burst_capacity(dma_id, vchan_id) < PACKED_BATCH_SIZE)
+ return -1;
+
+ return 0;
+
+err:
+ return -1;
+}
+
static __rte_always_inline int
virtio_dev_tx_batch_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
@@ -3769,16 +3860,74 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
return err;
}
+static __rte_always_inline int
+virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint16_t slot_idx,
+ uint16_t dma_id, uint16_t vchan_id)
+{
+ uint16_t avail_idx = vq->last_avail_idx;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ struct virtio_net_hdr *hdr;
+ uint32_t mbuf_offset = 0;
+ uintptr_t desc_addrs[PACKED_BATCH_SIZE];
+ uint64_t desc_vva;
+ uint64_t lens[PACKED_BATCH_SIZE];
+ void *host_iova[PACKED_BATCH_SIZE];
+ uint64_t mapped_len[PACKED_BATCH_SIZE];
+ uint16_t ids[PACKED_BATCH_SIZE];
+ uint16_t i;
+
+ if (vhost_async_tx_batch_packed_check(dev, vq, pkts, avail_idx,
+ desc_addrs, lens, ids, dma_id, vchan_id))
+ return -1;
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+ rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ host_iova[i] = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ desc_addrs[i] + buf_offset, pkts[i]->pkt_len, &mapped_len[i]);
+ }
+
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ async_iter_initialize(dev, async);
+ async_iter_add_iovec(dev, async,
+ host_iova[i],
+ (void *)(uintptr_t)rte_pktmbuf_iova_offset(pkts[i], mbuf_offset),
+ mapped_len[i]);
+ async->iter_idx++;
+ }
+
+ if (virtio_net_with_host_offload(dev)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ desc_vva = vhost_iova_to_vva(dev, vq, desc_addrs[i],
+ &lens[i], VHOST_ACCESS_RO);
+ hdr = (struct virtio_net_hdr *)(uintptr_t)desc_vva;
+ pkts_info[slot_idx + i].nethdr = *hdr;
+ }
+ }
+
+ vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+ vhost_async_shadow_dequeue_packed_batch(vq, ids);
+
+ return 0;
+}
+
static __rte_always_inline uint16_t
virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
{
- uint16_t pkt_idx;
+ uint32_t pkt_idx = 0;
uint16_t slot_idx = 0;
uint16_t nr_done_pkts = 0;
uint16_t pkt_err = 0;
uint32_t n_xfer;
+ uint16_t i;
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
@@ -3790,12 +3939,26 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
goto out;
- for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ do {
struct rte_mbuf *pkt = pkts_prealloc[pkt_idx];
rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
+ if (count - pkt_idx >= PACKED_BATCH_SIZE) {
+ if (!virtio_dev_tx_async_packed_batch(dev, vq, &pkts_prealloc[pkt_idx],
+ slot_idx, dma_id, vchan_id)) {
+ for (i = 0; i < PACKED_BATCH_SIZE; i++) {
+ slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
+ pkts_info[slot_idx].descs = 1;
+ pkts_info[slot_idx].nr_buffers = 1;
+ pkts_info[slot_idx].mbuf = pkts_prealloc[pkt_idx];
+ pkt_idx++;
+ }
+ continue;
+ }
+ }
+
if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt,
slot_idx, legacy_ol_flags))) {
rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx);
@@ -3809,7 +3972,8 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
pkts_info[slot_idx].mbuf = pkt;
- }
+ pkt_idx++;
+ } while (pkt_idx < count);
n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
async->iov_iter, pkt_idx);
--
2.35.1
next prev parent reply other threads:[~2022-12-20 1:31 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-20 0:44 [PATCH 0/3] Async vhost packed ring optimization Cheng Jiang
2022-12-20 0:44 ` [PATCH 1/3] vhost: remove redundant copy for packed shadow used ring Cheng Jiang
2022-12-20 0:44 ` [PATCH 2/3] vhost: add batch enqueue in async vhost packed ring Cheng Jiang
2022-12-20 0:44 ` Cheng Jiang [this message]
2023-01-13 2:56 ` [PATCH v2 0/3] Async vhost packed ring optimization Cheng Jiang
2023-01-13 2:56 ` [PATCH v2 1/3] vhost: remove redundant copy for packed shadow used ring Cheng Jiang
2023-02-02 9:13 ` Maxime Coquelin
2023-01-13 2:56 ` [PATCH v2 2/3] vhost: add batch enqueue in async vhost packed ring Cheng Jiang
2023-02-02 9:31 ` Maxime Coquelin
2023-01-13 2:56 ` [PATCH v2 3/3] vhost: add batch dequeue " Cheng Jiang
2023-02-02 10:07 ` Maxime Coquelin
2023-02-03 14:59 ` [PATCH v2 0/3] Async vhost packed ring optimization Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221220004415.29576-4-cheng1.jiang@intel.com \
--to=cheng1.jiang@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=wenwux.ma@intel.com \
--cc=xingguang.he@intel.com \
--cc=xuan.ding@intel.com \
--cc=yuanx.wang@intel.com \
--cc=yvonnex.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).