From: Yuan Wang <yuanx.wang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xingguang.he@intel.com,
weix.ling@intel.com, yuanx.wang@intel.com, stable@dpdk.org
Subject: [PATCH] examples/vhost: Fix retry logic on Rx
Date: Thu, 19 May 2022 00:25:05 +0800 [thread overview]
Message-ID: <20220518162505.1691401-1-yuanx.wang@intel.com> (raw)
drain_eth_rx() uses rte_vhost_avail_entries() to calculate
the available entries to determine if a retry is required.
However, this function only works with split rings, and
calculating packed rings will return the wrong value and cause
unnecessary retries resulting in a significant performance penalty.
This patch uses the difference between tx burst and rx burst
as a retry condition, and introduces enqueue_pkts() to reduce
code duplication.
Fixes: 4ecf22e356 ("vhost: export device id as the interface to applications")
Cc: stable@dpdk.org
Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
examples/vhost/main.c | 78 ++++++++++++++++++-------------------------
1 file changed, 32 insertions(+), 46 deletions(-)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index c4d46de1c5..198bf9dc4a 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -599,7 +599,7 @@ us_vhost_usage(const char *prgname)
{
RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
" --vm2vm [0|1|2]\n"
- " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
+ " --rx-retry [0|1] --mergeable [0|1] --stats [0-N]\n"
" --socket-file <path>\n"
" --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
@@ -1021,31 +1021,43 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
}
}
-static __rte_always_inline void
-drain_vhost(struct vhost_dev *vdev)
+static __rte_always_inline uint16_t
+enqueue_pkts(struct vhost_dev *vdev, struct rte_mbuf **pkts, uint16_t rx_count)
{
- uint16_t ret;
- uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
- uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
- struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+ uint16_t enqueue_count;
if (builtin_net_driver) {
- ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+ enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count);
} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
uint16_t enqueue_fail = 0;
int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
complete_async_pkts(vdev);
- ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
+ enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+ VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
- enqueue_fail = nr_xmit - ret;
+ enqueue_fail = rx_count - enqueue_count;
if (enqueue_fail)
- free_pkts(&m[ret], nr_xmit - ret);
+ free_pkts(&pkts[enqueue_count], enqueue_fail);
+
} else {
- ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- m, nr_xmit);
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ pkts, rx_count);
}
+ return enqueue_count;
+}
+
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+ uint16_t ret;
+ uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+ uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+ struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+ ret = enqueue_pkts(vdev, m, nr_xmit);
+
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
__ATOMIC_SEQ_CST);
@@ -1337,44 +1349,18 @@ drain_eth_rx(struct vhost_dev *vdev)
if (!rx_count)
return;
- /*
- * When "enable_retry" is set, here we wait and retry when there
- * is no enough free slots in the queue to hold @rx_count packets,
- * to diminish packet loss.
- */
- if (enable_retry &&
- unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
- VIRTIO_RXQ))) {
- uint32_t retry;
+ enqueue_count = enqueue_pkts(vdev, pkts, rx_count);
- for (retry = 0; retry < burst_rx_retry_num; retry++) {
+ /* Retry if necessary */
+ if (unlikely(enqueue_count < rx_count) && enable_retry) {
+ uint32_t retry = 0;
+
+ while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) {
rte_delay_us(burst_rx_delay_time);
- if (rx_count <= rte_vhost_avail_entries(vdev->vid,
- VIRTIO_RXQ))
- break;
+ enqueue_count += enqueue_pkts(vdev, pkts, rx_count - enqueue_count);
}
}
- if (builtin_net_driver) {
- enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
- pkts, rx_count);
- } else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
- uint16_t enqueue_fail = 0;
- int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
- complete_async_pkts(vdev);
- enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
- VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
-
- enqueue_fail = rx_count - enqueue_count;
- if (enqueue_fail)
- free_pkts(&pkts[enqueue_count], enqueue_fail);
-
- } else {
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
- pkts, rx_count);
- }
-
if (enable_stats) {
__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
__ATOMIC_SEQ_CST);
--
2.25.1
next reply other threads:[~2022-05-18 8:35 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-18 16:25 Yuan Wang [this message]
2022-05-26 9:30 ` Ling, WeiX
2022-06-17 7:01 ` [PATCH v2] examples/vhost: fix retry logic on eth rx path Yuan Wang
2022-06-20 3:20 ` Xia, Chenbo
2022-06-20 7:36 ` David Marchand
2022-06-20 7:49 ` Xia, Chenbo
2022-06-20 8:59 ` Hu, Jiayu
2022-06-20 9:09 ` Xia, Chenbo
2022-06-20 9:19 ` Wang, YuanX
2022-06-20 9:33 ` Xia, Chenbo
2022-06-20 9:42 ` Hu, Jiayu
2022-06-21 13:34 ` Xia, Chenbo
2022-06-22 2:26 ` Wang, YuanX
2022-06-22 6:27 ` [PATCH v3] " Yuan Wang
2022-06-22 7:23 ` Maxime Coquelin
2022-06-22 8:50 ` Wang, YuanX
2022-06-22 9:25 ` [PATCH v4] " Yuan Wang
2022-06-23 2:57 ` Xia, Chenbo
2022-06-23 7:20 ` Ling, WeiX
2022-07-01 13:57 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220518162505.1691401-1-yuanx.wang@intel.com \
--to=yuanx.wang@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=stable@dpdk.org \
--cc=weix.ling@intel.com \
--cc=xingguang.he@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).