From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, david.marchand@redhat.com, chenbox@nvidia.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v2 3/4] vhost: rework async dequeue path error handling
Date: Wed, 15 Jan 2025 13:59:37 +0100 [thread overview]
Message-ID: <20250115125938.2699577-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20250115125938.2699577-1-maxime.coquelin@redhat.com>
This patch refactors the error handling in the Vhost async
dequeue path to ease its maintenance and readability.
Suggested-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/virtio_net.c | 31 ++++++++++++++-----------------
1 file changed, 14 insertions(+), 17 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 3a4955fd30..59ea2d16a5 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -4197,52 +4197,51 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
int16_t success = 1;
+ uint16_t nb_rx = 0;
dev = get_device(vid);
if (!dev || !nr_inflight)
- return 0;
+ goto out_no_unlock;
*nr_inflight = -1;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
VHOST_DATA_LOG(dev->ifname, ERR, "%s: built-in vhost net backend is disabled.",
__func__);
- return 0;
+ goto out_no_unlock;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid virtqueue idx %d.",
__func__, queue_id);
- return 0;
+ goto out_no_unlock;
}
if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid dma id %d.",
__func__, dma_id);
- return 0;
+ goto out_no_unlock;
}
if (unlikely(!dma_copy_track[dma_id].vchans ||
!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid channel %d:%u.",
__func__, dma_id, vchan_id);
- return 0;
+ goto out_no_unlock;
}
vq = dev->virtqueue[queue_id];
if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
- return 0;
+ goto out_no_unlock;
if (unlikely(vq->enabled == 0)) {
- count = 0;
goto out_access_unlock;
}
if (unlikely(!vq->async)) {
VHOST_DATA_LOG(dev->ifname, ERR, "%s: async not registered for queue id %d.",
__func__, queue_id);
- count = 0;
goto out_access_unlock;
}
@@ -4253,7 +4252,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
rte_rwlock_read_unlock(&vq->access_lock);
virtio_dev_vring_translate(dev, vq);
- count = 0;
goto out_no_unlock;
}
@@ -4280,7 +4278,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
- count = 0;
goto out;
}
/*
@@ -4295,22 +4292,22 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- count = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
+ nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
else
- count = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
+ nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- count = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
+ nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
else
- count = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
+ nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
}
*nr_inflight = vq->async->pkts_inflight_n;
- vhost_queue_stats_update(dev, vq, pkts, count);
+ vhost_queue_stats_update(dev, vq, pkts, nb_rx);
out:
vhost_user_iotlb_rd_unlock(vq);
@@ -4319,8 +4316,8 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
rte_rwlock_read_unlock(&vq->access_lock);
if (unlikely(rarp_mbuf != NULL))
- count += 1;
+ nb_rx += 1;
out_no_unlock:
- return count;
+ return nb_rx;
}
--
2.47.1
next prev parent reply other threads:[~2025-01-15 13:00 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-15 12:59 [PATCH v2 0/4] vhost: fix and improve dequeue error path Maxime Coquelin
2025-01-15 12:59 ` [PATCH v2 1/4] vhost: fix missing packets count reset when not ready Maxime Coquelin
2025-01-15 16:41 ` David Marchand
2025-01-15 12:59 ` [PATCH v2 2/4] vhost: rework dequeue path error handling Maxime Coquelin
2025-01-15 16:42 ` David Marchand
2025-01-15 12:59 ` Maxime Coquelin [this message]
2025-01-15 16:42 ` [PATCH v2 3/4] vhost: rework async " David Marchand
2025-01-15 16:49 ` David Marchand
2025-01-15 12:59 ` [PATCH v2 4/4] vhost: improve RARP handling in dequeue paths Maxime Coquelin
2025-01-15 16:46 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250115125938.2699577-4-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbox@nvidia.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).