* [RFC v2] add support for async vhost packed ring dequeue
[not found] <http://patchwork.dpdk.org/project/dpdk/patch/20220407082549.3852-1-cheng1.jiang@intel.com/>
@ 2022-04-19 6:43 ` Cheng Jiang
0 siblings, 0 replies; only message in thread
From: Cheng Jiang @ 2022-04-19 6:43 UTC (permalink / raw)
To: maxime.coquelin, chenbo.xia
Cc: dev, jiayu.hu, xuan.ding, wenwux.ma, yuanx.wang, yvonnex.yang,
Cheng Jiang
This RFC patch implements packed ring dequeue data path for asynchronous
vhost.
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
---
It's based on the patch set:
http://patchwork.dpdk.org/project/dpdk/cover/20220419034323.92820-1-xuan.ding@intel.com/
lib/vhost/virtio_net.c | 219 ++++++++++++++++++++++++++++++++++++-----
1 file changed, 193 insertions(+), 26 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 514315ef50..26143344bc 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3168,7 +3168,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
}
static __rte_always_inline uint16_t
-async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
+async_poll_dequeue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count, uint16_t dma_id,
uint16_t vchan_id, bool legacy_ol_flags)
{
@@ -3186,7 +3186,7 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
from = start_idx;
while (vq->async->pkts_cmpl_flag[from] && count--) {
vq->async->pkts_cmpl_flag[from] = false;
- from = (from + 1) & (vq->size - 1);
+ from = (from + 1) % vq->size;
nr_cpl_pkts++;
}
@@ -3194,7 +3194,7 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
return 0;
for (i = 0; i < nr_cpl_pkts; i++) {
- from = (start_idx + i) & (vq->size - 1);
+ from = (start_idx + i) % vq->size;
pkts[i] = pkts_info[from].mbuf;
if (virtio_net_with_host_offload(dev))
@@ -3203,10 +3203,14 @@ async_poll_dequeue_completed_split(struct virtio_net *dev, uint16_t queue_id,
}
/* write back completed descs to used ring and update used idx */
- write_back_completed_descs_split(vq, nr_cpl_pkts);
- __atomic_add_fetch(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);
- vhost_vring_call_split(dev, vq);
-
+ if (vq_is_packed(dev)) {
+ write_back_completed_descs_packed(vq, nr_cpl_pkts);
+ vhost_vring_call_packed(dev, vq);
+ } else {
+ write_back_completed_descs_split(vq, nr_cpl_pkts);
+ __atomic_add_fetch(&vq->used->idx, nr_cpl_pkts, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ }
vq->async->pkts_inflight_n -= nr_cpl_pkts;
return nr_cpl_pkts;
@@ -3342,8 +3346,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
out:
/* DMA device may serve other queues, unconditionally check completed. */
- nr_done_pkts = async_poll_dequeue_completed_split(dev, queue_id, pkts, pkts_size,
- dma_id, vchan_id, legacy_ol_flags);
+ nr_done_pkts = async_poll_dequeue_completed(dev, queue_id, pkts, pkts_size,
+ dma_id, vchan_id, legacy_ol_flags);
return nr_done_pkts;
}
@@ -3370,6 +3374,172 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
pkts, count, dma_id, vchan_id, false);
}
+static __rte_always_inline void
+vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, uint16_t buf_id)
+{
+ struct vhost_async *async = vq->async;
+ uint16_t idx = async->buffer_idx_packed;
+
+ async->buffers_packed[idx].id = buf_id;
+ async->buffers_packed[idx].len = 0;
+ async->buffers_packed[idx].count = 1;
+
+ async->buffer_idx_packed++;
+ if (async->buffer_idx_packed >= vq->size)
+ async->buffer_idx_packed -= vq->size;
+
+}
+
+static __rte_always_inline int
+virtio_dev_tx_async_single_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf *pkts,
+ uint16_t slot_idx,
+ bool legacy_ol_flags)
+{
+ int err;
+ uint16_t buf_id, desc_count = 0;
+ uint16_t nr_vec = 0;
+ uint32_t buf_len;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ static bool allocerr_warned;
+
+ if (unlikely(fill_vec_buf_packed(dev, vq, vq->last_avail_idx, &desc_count,
+ buf_vec, &nr_vec, &buf_id, &buf_len,
+ VHOST_ACCESS_RO) < 0))
+ return -1;
+
+ if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR, "Failed mbuf alloc of size %d from %s on %s.\n",
+ buf_len, mbuf_pool->name, dev->ifname);
+ allocerr_warned = true;
+ }
+ return -1;
+ }
+
+ err = desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts, mbuf_pool,
+ legacy_ol_flags, slot_idx, true);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts);
+ if (!allocerr_warned) {
+ VHOST_LOG_DATA(ERR, "Failed to copy desc to mbuf on %s.\n", dev->ifname);
+ allocerr_warned = true;
+ }
+ return -1;
+ }
+
+ /* update async shadow packed ring */
+ vhost_async_shadow_dequeue_single_packed(vq, buf_id);
+
+ return err;
+}
+
+static __rte_always_inline uint16_t
+virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+ uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
+{
+ uint16_t pkt_idx;
+ uint16_t slot_idx = 0;
+ uint16_t nr_done_pkts = 0;
+ uint16_t pkt_err = 0;
+ uint32_t n_xfer;
+ struct vhost_async *async = vq->async;
+ struct async_inflight_info *pkts_info = async->pkts_info;
+ struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
+
+ VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, count);
+
+ async_iter_reset(async);
+
+ if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
+ goto out;
+
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ struct rte_mbuf *pkt = pkts_prealloc[pkt_idx];
+
+ rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+
+ slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
+ if (unlikely(virtio_dev_tx_async_single_packed(dev, vq, mbuf_pool, pkt,
+ slot_idx, legacy_ol_flags))) {
+ rte_pktmbuf_free_bulk(&pkts_prealloc[pkt_idx], count - pkt_idx);
+ break;
+ }
+
+ pkts_info[slot_idx].mbuf = pkt;
+
+ vq_inc_last_avail_packed(vq, 1);
+
+ }
+
+ n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
+ async->iov_iter, pkt_idx);
+
+ async->pkts_inflight_n += n_xfer;
+
+ pkt_err = pkt_idx - n_xfer;
+
+ if (unlikely(pkt_err)) {
+ pkt_idx -= pkt_err;
+
+ /**
+ * recover DMA-copy related structures and free pktmbuf for DMA-error pkts.
+ */
+ if (async->buffer_idx_packed >= pkt_err)
+ async->buffer_idx_packed -= pkt_err;
+ else
+ async->buffer_idx_packed += vq->size - pkt_err;
+
+ while (pkt_err-- > 0) {
+ rte_pktmbuf_free(pkts_info[slot_idx % vq->size].mbuf);
+ slot_idx--;
+ }
+
+ /* recover available ring */
+ if (vq->last_avail_idx >= pkt_err) {
+ vq->last_avail_idx -= pkt_err;
+ } else {
+ vq->last_avail_idx += vq->size - pkt_err;
+ vq->avail_wrap_counter ^= 1;
+ }
+ }
+
+ async->pkts_idx += pkt_idx;
+ if (async->pkts_idx >= vq->size)
+ async->pkts_idx -= vq->size;
+
+out:
+ nr_done_pkts = async_poll_dequeue_completed(dev, queue_id, pkts, count,
+ dma_id, vchan_id, legacy_ol_flags);
+
+ return nr_done_pkts;
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_async_packed_legacy(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+ uint16_t count, uint16_t dma_id, uint16_t vchan_id)
+{
+ return virtio_dev_tx_async_packed(dev, vq, queue_id, mbuf_pool,
+ pkts, count, dma_id, vchan_id, true);
+}
+
+__rte_noinline
+static uint16_t
+virtio_dev_tx_async_packed_compliant(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
+ uint16_t count, uint16_t dma_id, uint16_t vchan_id)
+{
+ return virtio_dev_tx_async_packed(dev, vq, queue_id, mbuf_pool,
+ pkts, count, dma_id, vchan_id, false);
+}
+
uint16_t
rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
@@ -3468,25 +3638,22 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
count -= 1;
}
- if (unlikely(vq_is_packed(dev))) {
- static bool not_support_pack_log;
- if (!not_support_pack_log) {
- VHOST_LOG_DATA(ERR,
- "(%s) %s: async dequeue does not support packed ring.\n",
- dev->ifname, __func__);
- not_support_pack_log = true;
- }
- count = 0;
- goto out;
+ if (vq_is_packed(dev)) {
+ if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+ count = virtio_dev_tx_async_packed_legacy(dev, vq, queue_id,
+ mbuf_pool, pkts, count, dma_id, vchan_id);
+ else
+ count = virtio_dev_tx_async_packed_compliant(dev, vq, queue_id,
+ mbuf_pool, pkts, count, dma_id, vchan_id);
+ } else {
+ if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
+ count = virtio_dev_tx_async_split_legacy(dev, vq, queue_id,
+ mbuf_pool, pkts, count, dma_id, vchan_id);
+ else
+ count = virtio_dev_tx_async_split_compliant(dev, vq, queue_id,
+ mbuf_pool, pkts, count, dma_id, vchan_id);
}
- if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- count = virtio_dev_tx_async_split_legacy(dev, vq, queue_id,
- mbuf_pool, pkts, count, dma_id, vchan_id);
- else
- count = virtio_dev_tx_async_split_compliant(dev, vq, queue_id,
- mbuf_pool, pkts, count, dma_id, vchan_id);
-
*nr_inflight = vq->async->pkts_inflight_n;
out:
--
2.35.1
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2022-04-19 7:12 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <http://patchwork.dpdk.org/project/dpdk/patch/20220407082549.3852-1-cheng1.jiang@intel.com/>
2022-04-19 6:43 ` [RFC v2] add support for async vhost packed ring dequeue Cheng Jiang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).