From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, david.marchand@redhat.com, chenbox@nvidia.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v3 3/3] vhost: improve RARP handling in dequeue paths
Date: Thu, 16 Jan 2025 10:54:16 +0100 [thread overview]
Message-ID: <20250116095416.3655699-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20250116095416.3655699-1-maxime.coquelin@redhat.com>
With previous refactoring, we can now simplify the RARP
packet injection handling in both the sync and async
dequeue paths.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
lib/vhost/virtio_net.c | 72 ++++++++++++++++++------------------------
1 file changed, 30 insertions(+), 42 deletions(-)
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 59ea2d16a5..c5de2d7a28 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3590,7 +3590,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
struct virtio_net *dev;
- struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
int16_t success = 1;
uint16_t nb_rx = 0;
@@ -3651,32 +3650,32 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {
-
- rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
- if (rarp_mbuf == NULL) {
+ /*
+ * Inject the RARP packet to the head of "pkts" array,
+ * so that switch's mac learning table will get updated first.
+ */
+ pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+ if (pkts[nb_rx] == NULL) {
VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
goto out;
}
- /*
- * Inject it to the head of "pkts" array, so that switch's mac
- * learning table will get updated first.
- */
- pkts[0] = rarp_mbuf;
- vhost_queue_stats_update(dev, vq, pkts, 1);
- pkts++;
- count -= 1;
+ nb_rx += 1;
}
if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
+ nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx);
else
- nb_rx = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
+ nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
+ nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx);
else
- nb_rx = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
+ nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx);
}
vhost_queue_stats_update(dev, vq, pkts, nb_rx);
@@ -3687,9 +3686,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
out_access_unlock:
rte_rwlock_read_unlock(&vq->access_lock);
- if (unlikely(rarp_mbuf != NULL))
- nb_rx += 1;
-
out_no_unlock:
return nb_rx;
}
@@ -4194,7 +4190,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
int *nr_inflight, int16_t dma_id, uint16_t vchan_id)
{
struct virtio_net *dev;
- struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
int16_t success = 1;
uint16_t nb_rx = 0;
@@ -4274,36 +4269,32 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {
-
- rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
- if (rarp_mbuf == NULL) {
+ /*
+ * Inject the RARP packet to the head of "pkts" array,
+ * so that switch's mac learning table will get updated first.
+ */
+ pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+ if (pkts[nb_rx] == NULL) {
VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
goto out;
}
- /*
- * Inject it to the head of "pkts" array, so that switch's mac
- * learning table will get updated first.
- */
- pkts[0] = rarp_mbuf;
- vhost_queue_stats_update(dev, vq, pkts, 1);
- pkts++;
- count -= 1;
+ nb_rx += 1;
}
if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
- pkts, count, dma_id, vchan_id);
+ nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
else
- nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
- pkts, count, dma_id, vchan_id);
+ nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
- nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
- pkts, count, dma_id, vchan_id);
+ nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
else
- nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
- pkts, count, dma_id, vchan_id);
+ nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
+ pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
}
*nr_inflight = vq->async->pkts_inflight_n;
@@ -4315,9 +4306,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
out_access_unlock:
rte_rwlock_read_unlock(&vq->access_lock);
- if (unlikely(rarp_mbuf != NULL))
- nb_rx += 1;
-
out_no_unlock:
return nb_rx;
}
--
2.47.1
next prev parent reply other threads:[~2025-01-16 9:54 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-16 9:54 [PATCH v3 0/3] vhost: fix and improve dequeue error path Maxime Coquelin
2025-01-16 9:54 ` [PATCH v3 1/3] vhost: fix missing packets count reset when not ready Maxime Coquelin
2025-01-16 10:10 ` David Marchand
2025-01-16 12:14 ` Chenbo Xia
2025-01-16 9:54 ` [PATCH v3 2/3] vhost: rework dequeue paths error handling Maxime Coquelin
2025-01-16 10:10 ` David Marchand
2025-01-16 12:15 ` Chenbo Xia
2025-01-16 9:54 ` Maxime Coquelin [this message]
2025-01-16 10:12 ` [PATCH v3 3/3] vhost: improve RARP handling in dequeue paths David Marchand
2025-01-16 12:15 ` Chenbo Xia
2025-01-16 13:08 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250116095416.3655699-4-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbox@nvidia.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).