From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id CFB6AA0503;
	Wed, 18 May 2022 10:35:28 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 75FE940221;
	Wed, 18 May 2022 10:35:28 +0200 (CEST)
Received: from mga02.intel.com (mga02.intel.com [134.134.136.20])
 by mails.dpdk.org (Postfix) with ESMTP id 694C540156;
 Wed, 18 May 2022 10:35:26 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1652862926; x=1684398926;
 h=from:to:cc:subject:date:message-id:mime-version:
 content-transfer-encoding;
 bh=LzVCRZAXobgFQIz9q3+FB3KGyKeEqlZ+dtZZ2o96DlA=;
 b=abKeb9Gvg+nz39OprxLE2yMR+1kOm+VCzgH4FZDNDtHK+Z8wlP13vjjS
 yeXpB0ZriTPRl6VXjnlw31GUOOkm8l+/MHmzWmvIAXXWaXJzeB4sJHozE
 DUGNPgiegTebuuVaNSj2FJgmnhOsE5rdWiYHiKZyU0jzb0h00V/AkVFuU
 LvRRIvE6T4JAJ6KlbzOfIUETJcjgHNGTyPiKLiQGyP9nPncE0L91tuKU7
 PMQv07D2GcYGww2fMGHjw/Ro+gEdThG5T6P4jB1L1LIjEqhEs3orSfsug
 uTDwxRjp7qwTVmZosiwI/gXQJp3szdQaHulUbFxHAMqBjl+omZeMQABFl g==;
X-IronPort-AV: E=McAfee;i="6400,9594,10350"; a="259126442"
X-IronPort-AV: E=Sophos;i="5.91,234,1647327600"; d="scan'208";a="259126442"
Received: from orsmga007.jf.intel.com ([10.7.209.58])
 by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 18 May 2022 01:35:25 -0700
X-IronPort-AV: E=Sophos;i="5.91,234,1647327600"; d="scan'208";a="569349024"
Received: from unknown (HELO localhost.localdomain) ([10.239.251.55])
 by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 18 May 2022 01:35:22 -0700
From: Yuan Wang <yuanx.wang@intel.com>
To: maxime.coquelin@redhat.com,
	chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xingguang.he@intel.com,
 weix.ling@intel.com, yuanx.wang@intel.com, stable@dpdk.org
Subject: [PATCH] examples/vhost: Fix retry logic on Rx
Date: Thu, 19 May 2022 00:25:05 +0800
Message-Id: <20220518162505.1691401-1-yuanx.wang@intel.com>
X-Mailer: git-send-email 2.25.1
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

drain_eth_rx() uses rte_vhost_avail_entries() to calculate
the available entries to determine if a retry is required.
However, this function only works with split rings, and
calculating packed rings will return the wrong value and cause
unnecessary retries resulting in a significant performance penalty.

This patch uses the difference between tx burst and rx burst
as a retry condition, and introduces enqueue_pkts() to reduce
code duplication.

Fixes: 4ecf22e356 ("vhost: export device id as the interface to applications")
Cc: stable@dpdk.org

Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
 examples/vhost/main.c | 78 ++++++++++++++++++-------------------------
 1 file changed, 32 insertions(+), 46 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index c4d46de1c5..198bf9dc4a 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -599,7 +599,7 @@ us_vhost_usage(const char *prgname)
 {
 	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
 	"		--vm2vm [0|1|2]\n"
-	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
+	"		--rx-retry [0|1] --mergeable [0|1] --stats [0-N]\n"
 	"		--socket-file <path>\n"
 	"		--nb-devices ND\n"
 	"		-p PORTMASK: Set mask for ports to be used by application\n"
@@ -1021,31 +1021,43 @@ sync_virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
 	}
 }
 
-static __rte_always_inline void
-drain_vhost(struct vhost_dev *vdev)
+static __rte_always_inline uint16_t
+enqueue_pkts(struct vhost_dev *vdev, struct rte_mbuf **pkts, uint16_t rx_count)
 {
-	uint16_t ret;
-	uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
-	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
-	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+	uint16_t enqueue_count;
 
 	if (builtin_net_driver) {
-		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
+		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ, pkts, rx_count);
 	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
 		uint16_t enqueue_fail = 0;
 		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
 
 		complete_async_pkts(vdev);
-		ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, dma_id, 0);
+		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
+					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
 
-		enqueue_fail = nr_xmit - ret;
+		enqueue_fail = rx_count - enqueue_count;
 		if (enqueue_fail)
-			free_pkts(&m[ret], nr_xmit - ret);
+			free_pkts(&pkts[enqueue_count], enqueue_fail);
+
 	} else {
-		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						m, nr_xmit);
+		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+						pkts, rx_count);
 	}
 
+	return enqueue_count;
+}
+
+static __rte_always_inline void
+drain_vhost(struct vhost_dev *vdev)
+{
+	uint16_t ret;
+	uint32_t buff_idx = rte_lcore_id() * RTE_MAX_VHOST_DEVICE + vdev->vid;
+	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
+	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
+
+	ret = enqueue_pkts(vdev, m, nr_xmit);
+
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
 				__ATOMIC_SEQ_CST);
@@ -1337,44 +1349,18 @@ drain_eth_rx(struct vhost_dev *vdev)
 	if (!rx_count)
 		return;
 
-	/*
-	 * When "enable_retry" is set, here we wait and retry when there
-	 * is no enough free slots in the queue to hold @rx_count packets,
-	 * to diminish packet loss.
-	 */
-	if (enable_retry &&
-	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
-			VIRTIO_RXQ))) {
-		uint32_t retry;
+	enqueue_count = enqueue_pkts(vdev, pkts, rx_count);
 
-		for (retry = 0; retry < burst_rx_retry_num; retry++) {
+	/* Retry if necessary */
+	if (unlikely(enqueue_count < rx_count) && enable_retry) {
+		uint32_t retry = 0;
+
+		while (enqueue_count < rx_count && retry++ < burst_rx_retry_num) {
 			rte_delay_us(burst_rx_delay_time);
-			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
-					VIRTIO_RXQ))
-				break;
+			enqueue_count += enqueue_pkts(vdev, pkts, rx_count - enqueue_count);
 		}
 	}
 
-	if (builtin_net_driver) {
-		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
-						pkts, rx_count);
-	} else if (dma_bind[vdev->vid].dmas[VIRTIO_RXQ].async_enabled) {
-		uint16_t enqueue_fail = 0;
-		int16_t dma_id = dma_bind[vdev->vid].dmas[VIRTIO_RXQ].dev_id;
-
-		complete_async_pkts(vdev);
-		enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
-					VIRTIO_RXQ, pkts, rx_count, dma_id, 0);
-
-		enqueue_fail = rx_count - enqueue_count;
-		if (enqueue_fail)
-			free_pkts(&pkts[enqueue_count], enqueue_fail);
-
-	} else {
-		enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
-						pkts, rx_count);
-	}
-
 	if (enable_stats) {
 		__atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
 				__ATOMIC_SEQ_CST);
-- 
2.25.1