patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Yuan Wang <yuanx.wang@intel.com>
To: luca.boccassi@gmail.com, stable@dpdk.org
Cc: maxime.coquelin@redhat.com, chenbo.xia@intel.com,
	jiayu.hu@intel.com, cheng1.jiang@intel.com, weix.ling@intel.com,
	Yuan Wang <yuanx.wang@intel.com>
Subject: [PATCH 21.11] examples/vhost: fix retry logic on Rx path
Date: Sat,  9 Jul 2022 01:14:35 +0800	[thread overview]
Message-ID: <20220708171435.60845-1-yuanx.wang@intel.com> (raw)

[ upstream commit 1907ce4baec392a750fbeba5e946920b2f00ae73 ]

drain_eth_rx() uses rte_vhost_avail_entries() to calculate
the available entries to determine if a retry is required.
However, this function only works with split rings, and
calculating packed rings will return the wrong value and cause
unnecessary retries resulting in a significant performance penalty.

This patch fix that by using the difference between tx/rx burst
as the retry condition.

Fixes: be800696c26e ("examples/vhost: use burst enqueue and dequeue from lib")

Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 examples/vhost/main.c | 79 ++++++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 46 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 84844da68f..f9e932061f 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -900,31 +900,43 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 	}
 }
 
-static __rte_always_inline void
-drain_vhost(struct vhost_dev *vdev)
+static __rte_always_inline uint16_t
+enqueue_pkts(struct vhost_dev *vdev, struct rte_mbuf **pkts, uint16_t rx_count)
 {
-	uint16_t ret;
-	uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
-	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
-	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+	uint16_t enqueue_count;
 
 	if (builtin_net_driver) {
-		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count);
 	} else if (async_vhost_driver) {
 		uint16_t enqueue_fail = 0;
 
 		complete_async_pkts(vdev);
-		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit);
-		__atomic_add_fetch(&vdev->pkts_inflight, ret, __ATOMIC_SEQ_CST);
+		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+					VIRTIO_RXQ, pkts, rx_count);
+		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
 
-		enqueue_fail = nr_xmit - ret;
+		enqueue_fail = rx_count - enqueue_count;
 		if (enqueue_fail)
-			free_pkts(&m[ret], nr_xmit - ret);
+			free_pkts(&pkts[enqueue_count], enqueue_fail);
+
 	} else {
-		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						m, nr_xmit);
+		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+						pkts, rx_count);
 	}
 
+	return enqueue_count;
+}
+
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+	uint16_t ret;
+	uint32_t buff_idx = rte_lcore_id() * MAX_VHOST_DEVICE + vdev->vid;
+	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+	ret = enqueue_pkts(vdev, m, nr_xmit);
+
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
 				__ATOMIC_SEQ_CST);
@@ -1217,44 +1229,19 @@ drain_eth_rx(struct vhost_dev *vdev)
 	if (!rx_count)
 		return;
 
-	/*
-	 * When "enable_retry" is set, here we wait and retry when there
-	 * is no enough free slots in the queue to hold @rx_count packets,
-	 * to diminish packet loss.
-	 */
-	if (enable_retry &&
-	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
-			VIRTIO_RXQ))) {
-		uint32_t retry;
+	enqueue_count = enqueue_pkts(vdev, pkts, rx_count);
 
-		for (retry = 0; retry < burst_rx_retry_num; retry++) {
+	/* Retry if necessary */
+	if (enable_retry && unlikely(enqueue_count < rx_count)) {
+		uint32_t retry = 0;
+
+		while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) {
 			rte_delay_us(burst_rx_delay_time);
-			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
-					VIRTIO_RXQ))
-				break;
+			enqueue_count += enqueue_pkts(vdev, &pkts[enqueue_count],
+						rx_count - enqueue_count);
 		}
 	}
 
-	if (builtin_net_driver) {
-		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
-						pkts, rx_count);
-	} else if (async_vhost_driver) {
-		uint16_t enqueue_fail = 0;
-
-		complete_async_pkts(vdev);
-		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-					VIRTIO_RXQ, pkts, rx_count);
-		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count, __ATOMIC_SEQ_CST);
-
-		enqueue_fail = rx_count - enqueue_count;
-		if (enqueue_fail)
-			free_pkts(&pkts[enqueue_count], enqueue_fail);
-
-	} else {
-		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						pkts, rx_count);
-	}
-
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
 				__ATOMIC_SEQ_CST);
-- 
2.25.1


             reply	other threads:[~2022-07-08  9:25 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-08 17:14 Yuan Wang [this message]
2022-07-08 10:55 ` Luca Boccassi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220708171435.60845-1-yuanx.wang@intel.com \
    --to=yuanx.wang@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=jiayu.hu@intel.com \
    --cc=luca.boccassi@gmail.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=weix.ling@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).