DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: bruce.richardson@intel.com, Ciara Loftus <ciara.loftus@intel.com>
Subject: [PATCH v2 04/15] net/i40e: use the same Rx path across process types
Date: Thu,  7 Aug 2025 12:39:38 +0000	[thread overview]
Message-ID: <20250807123949.4063416-5-ciara.loftus@intel.com> (raw)
In-Reply-To: <20250807123949.4063416-1-ciara.loftus@intel.com>

In the interest of simplicity, let the primary process select the Rx
path to be used by all processes using the given device.

The many logs which report individual Rx path selections have been
consolidated into one single log

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
---
v2:
* reordered i40e_rx_func_type and i40e_rx_burst_infos elements
---
 drivers/net/intel/i40e/i40e_ethdev.h |  20 +++-
 drivers/net/intel/i40e/i40e_rxtx.c   | 168 ++++++++++++---------------
 2 files changed, 93 insertions(+), 95 deletions(-)

diff --git a/drivers/net/intel/i40e/i40e_ethdev.h b/drivers/net/intel/i40e/i40e_ethdev.h
index 44864292d0..c38cb5f340 100644
--- a/drivers/net/intel/i40e/i40e_ethdev.h
+++ b/drivers/net/intel/i40e/i40e_ethdev.h
@@ -1226,6 +1226,22 @@ struct i40e_vsi_vlan_pvid_info {
 #define I40E_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
 #define I40E_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
 
+enum i40e_rx_func_type {
+	I40E_RX_DEFAULT,
+	I40E_RX_SCATTERED,
+	I40E_RX_BULK_ALLOC,
+	I40E_RX_SSE,
+	I40E_RX_SSE_SCATTERED,
+	I40E_RX_AVX2,
+	I40E_RX_AVX2_SCATTERED,
+	I40E_RX_AVX512,
+	I40E_RX_AVX512_SCATTERED,
+	I40E_RX_NEON,
+	I40E_RX_NEON_SCATTERED,
+	I40E_RX_ALTIVEC,
+	I40E_RX_ALTIVEC_SCATTERED,
+};
+
 /*
  * Structure to store private data for each PF/VF instance.
  */
@@ -1242,6 +1258,8 @@ struct i40e_adapter {
 	bool tx_simple_allowed;
 	bool tx_vec_allowed;
 
+	enum i40e_rx_func_type rx_func_type;
+
 	uint64_t mbuf_check; /* mbuf check flags. */
 	uint16_t max_pkt_len; /* Maximum packet length */
 	eth_tx_burst_t tx_pkt_burst;
@@ -1262,8 +1280,6 @@ struct i40e_adapter {
 	uint8_t rss_reta_updated;
 
 	/* used only on x86, zero on other architectures */
-	bool rx_use_avx2;
-	bool rx_use_avx512;
 	bool tx_use_avx2;
 	bool tx_use_avx512;
 };
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index aba3c11ee5..c89359e625 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -3310,6 +3310,31 @@ get_avx_supported(bool request_avx512)
 }
 #endif /* RTE_ARCH_X86 */
 
+static const struct {
+	eth_rx_burst_t pkt_burst;
+	const char *info;
+} i40e_rx_burst_infos[] = {
+	[I40E_RX_DEFAULT] = { i40e_recv_pkts, "Scalar" },
+	[I40E_RX_SCATTERED] = { i40e_recv_scattered_pkts, "Scalar Scattered" },
+	[I40E_RX_BULK_ALLOC] = { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
+#ifdef RTE_ARCH_X86
+	[I40E_RX_SSE] = { i40e_recv_pkts_vec, "Vector SSE" },
+	[I40E_RX_SSE_SCATTERED] = { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" },
+	[I40E_RX_AVX2] = { i40e_recv_pkts_vec_avx2, "Vector AVX2" },
+	[I40E_RX_AVX2_SCATTERED] = { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+#ifdef CC_AVX512_SUPPORT
+	[I40E_RX_AVX512] = { i40e_recv_pkts_vec_avx512, "Vector AVX512" },
+	[I40E_RX_AVX512_SCATTERED] = {
+		i40e_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+#endif
+#elif defined(RTE_ARCH_ARM64)
+	[I40E_RX_NEON] = { i40e_recv_pkts_vec, "Vector Neon" },
+	[I40E_RX_NEON_SCATTERED] = { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" },
+#elif defined(RTE_ARCH_PPC_64)
+	[I40E_RX_ALTIVEC] = { i40e_recv_pkts_vec, "Vector AltiVec" },
+	[I40E_RX_ALTIVEC_SCATTERED] = { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" },
+#endif
+};
 
 void __rte_cold
 i40e_set_rx_function(struct rte_eth_dev *dev)
@@ -3317,109 +3342,86 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	uint16_t vector_rx, i;
+
+	/* The primary process selects the rx path for all processes. */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		goto out;
+
 	/* In order to allow Vector Rx there are a few configuration
 	 * conditions to be met and Rx Bulk Allocation should be allowed.
 	 */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 #ifdef RTE_ARCH_X86
-		ad->rx_use_avx512 = false;
-		ad->rx_use_avx2 = false;
+	bool rx_use_avx512 = false, rx_use_avx2 = false;
 #endif
-		if (i40e_rx_vec_dev_conf_condition_check(dev) ||
-		    !ad->rx_bulk_alloc_allowed) {
-			PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
-				     " Vector Rx preconditions",
-				     dev->data->port_id);
+	if (i40e_rx_vec_dev_conf_condition_check(dev) || !ad->rx_bulk_alloc_allowed) {
+		PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+				" Vector Rx preconditions",
+				dev->data->port_id);
 
-			ad->rx_vec_allowed = false;
-		}
-		if (ad->rx_vec_allowed) {
-			for (i = 0; i < dev->data->nb_rx_queues; i++) {
-				struct ci_rx_queue *rxq =
-					dev->data->rx_queues[i];
+		ad->rx_vec_allowed = false;
+	}
+	if (ad->rx_vec_allowed) {
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			struct ci_rx_queue *rxq =
+				dev->data->rx_queues[i];
 
-				if (rxq && i40e_rxq_vec_setup(rxq)) {
-					ad->rx_vec_allowed = false;
-					break;
-				}
+			if (rxq && i40e_rxq_vec_setup(rxq)) {
+				ad->rx_vec_allowed = false;
+				break;
 			}
+		}
 #ifdef RTE_ARCH_X86
-			ad->rx_use_avx512 = get_avx_supported(1);
+		rx_use_avx512 = get_avx_supported(1);
 
-			if (!ad->rx_use_avx512)
-				ad->rx_use_avx2 = get_avx_supported(0);
+		if (!rx_use_avx512)
+			rx_use_avx2 = get_avx_supported(0);
 #endif
-		}
 	}
 
-	if (ad->rx_vec_allowed  &&
-	    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+	if (ad->rx_vec_allowed && rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 #ifdef RTE_ARCH_X86
 		if (dev->data->scattered_rx) {
-			if (ad->rx_use_avx512) {
+			if (rx_use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Scattered Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					i40e_recv_scattered_pkts_vec_avx512;
+				ad->rx_func_type = I40E_RX_AVX512_SCATTERED;
 #endif
 			} else {
-				PMD_INIT_LOG(DEBUG,
-					"Using %sVector Scattered Rx (port %d).",
-					ad->rx_use_avx2 ? "avx2 " : "",
-					dev->data->port_id);
-				dev->rx_pkt_burst = ad->rx_use_avx2 ?
-					i40e_recv_scattered_pkts_vec_avx2 :
-					i40e_recv_scattered_pkts_vec;
+				ad->rx_func_type = rx_use_avx2 ?
+					I40E_RX_AVX2_SCATTERED :
+					I40E_RX_SCATTERED;
 				dev->recycle_rx_descriptors_refill =
 					i40e_recycle_rx_descriptors_refill_vec;
 			}
 		} else {
-			if (ad->rx_use_avx512) {
+			if (rx_use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					i40e_recv_pkts_vec_avx512;
+				ad->rx_func_type = I40E_RX_AVX512;
 #endif
 			} else {
-				PMD_INIT_LOG(DEBUG,
-					"Using %sVector Rx (port %d).",
-					ad->rx_use_avx2 ? "avx2 " : "",
-					dev->data->port_id);
-				dev->rx_pkt_burst = ad->rx_use_avx2 ?
-					i40e_recv_pkts_vec_avx2 :
-					i40e_recv_pkts_vec;
+				ad->rx_func_type = rx_use_avx2 ?
+					I40E_RX_AVX2 :
+					I40E_RX_SSE;
 				dev->recycle_rx_descriptors_refill =
 					i40e_recycle_rx_descriptors_refill_vec;
 			}
 		}
-#else /* RTE_ARCH_X86 */
+#elif defined(RTE_ARCH_ARM64)
 		dev->recycle_rx_descriptors_refill = i40e_recycle_rx_descriptors_refill_vec;
-		if (dev->data->scattered_rx) {
-			PMD_INIT_LOG(DEBUG,
-				     "Using Vector Scattered Rx (port %d).",
-				     dev->data->port_id);
-			dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
-		} else {
-			PMD_INIT_LOG(DEBUG, "Using Vector Rx (port %d).",
-				     dev->data->port_id);
-			dev->rx_pkt_burst = i40e_recv_pkts_vec;
-		}
+		if (dev->data->scattered_rx)
+			ad->rx_func_type = I40E_RX_NEON_SCATTERED;
+		else
+			ad->rx_func_type = I40E_RX_NEON;
+#elif defined(RTE_ARCH_PPC_64)
+		dev->recycle_rx_descriptors_refill = i40e_recycle_rx_descriptors_refill_vec;
+		if (dev->data->scattered_rx)
+			ad->rx_func_type = I40E_RX_ALTIVEC_SCATTERED;
+		else
+			ad->rx_func_type = I40E_RX_ALTIVEC;
 #endif /* RTE_ARCH_X86 */
 	} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
-		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
-				    "satisfied. Rx Burst Bulk Alloc function "
-				    "will be used on port=%d.",
-			     dev->data->port_id);
-
 		dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
 	} else {
 		/* Simple Rx Path. */
-		PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
-			     dev->data->port_id);
 		dev->rx_pkt_burst = dev->data->scattered_rx ?
 					i40e_recv_scattered_pkts :
 					i40e_recv_pkts;
@@ -3444,32 +3446,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
 				rxq->vector_rx = vector_rx;
 		}
 	}
-}
 
-static const struct {
-	eth_rx_burst_t pkt_burst;
-	const char *info;
-} i40e_rx_burst_infos[] = {
-	{ i40e_recv_scattered_pkts,          "Scalar Scattered" },
-	{ i40e_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
-	{ i40e_recv_pkts,                    "Scalar" },
-#ifdef RTE_ARCH_X86
-#ifdef CC_AVX512_SUPPORT
-	{ i40e_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
-	{ i40e_recv_pkts_vec_avx512,           "Vector AVX512" },
-#endif
-	{ i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
-	{ i40e_recv_pkts_vec_avx2,           "Vector AVX2" },
-	{ i40e_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
-	{ i40e_recv_pkts_vec,                "Vector SSE" },
-#elif defined(RTE_ARCH_ARM64)
-	{ i40e_recv_scattered_pkts_vec,      "Vector Neon Scattered" },
-	{ i40e_recv_pkts_vec,                "Vector Neon" },
-#elif defined(RTE_ARCH_PPC_64)
-	{ i40e_recv_scattered_pkts_vec,      "Vector AltiVec Scattered" },
-	{ i40e_recv_pkts_vec,                "Vector AltiVec" },
-#endif
-};
+out:
+	dev->rx_pkt_burst = i40e_rx_burst_infos[ad->rx_func_type].pkt_burst;
+	PMD_DRV_LOG(NOTICE, "Using %s Rx burst function (port %d).",
+		i40e_rx_burst_infos[ad->rx_func_type].info, dev->data->port_id);
+}
 
 int
 i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
-- 
2.34.1


  parent reply	other threads:[~2025-08-07 12:40 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-25 12:49 [RFC PATCH 00/14] net/intel: rx path selection simplification Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 01/14] net/ice: use the same Rx path across process types Ciara Loftus
2025-07-25 13:40   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 02/14] net/iavf: rename Rx/Tx function type variables Ciara Loftus
2025-07-25 13:40   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 03/14] net/iavf: use the same Rx path across process types Ciara Loftus
2025-07-25 13:41   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 04/14] net/i40e: " Ciara Loftus
2025-07-25 13:43   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 05/14] net/intel: introduce common vector capability function Ciara Loftus
2025-07-25 13:45   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 06/14] net/ice: use the new " Ciara Loftus
2025-07-25 13:56   ` Bruce Richardson
2025-08-06 14:46     ` Loftus, Ciara
2025-07-25 12:49 ` [RFC PATCH 07/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 08/14] net/i40e: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 09/14] net/iavf: remove redundant field from iavf adapter struct Ciara Loftus
2025-07-25 14:51   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 10/14] net/intel: introduce infrastructure for Rx path selection Ciara Loftus
2025-07-25 15:21   ` Bruce Richardson
2025-08-06 10:14     ` Loftus, Ciara
2025-08-06 10:36       ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 11/14] net/ice: remove unsupported Rx offload Ciara Loftus
2025-07-25 15:22   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 12/14] net/ice: use the common Rx path selection infrastructure Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 13/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 14/14] net/i40e: " Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 00/15] net/intel: rx path selection simplification Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 01/15] net/ice: use the same Rx path across process types Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 02/15] net/iavf: rename Rx/Tx function type variables Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 03/15] net/iavf: use the same Rx path across process types Ciara Loftus
2025-08-07 12:39   ` Ciara Loftus [this message]
2025-08-07 12:39   ` [PATCH v2 05/15] net/intel: introduce common vector capability function Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 06/15] net/ice: use the new " Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 07/15] net/iavf: " Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 08/15] net/i40e: " Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 09/15] net/iavf: remove redundant field from iavf adapter struct Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 10/15] net/ice: remove unsupported Rx offload Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 11/15] net/iavf: reorder enum of Rx function types Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 12/15] net/intel: introduce infrastructure for Rx path selection Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 13/15] net/ice: use the common Rx path selection infrastructure Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 14/15] net/iavf: " Ciara Loftus
2025-08-07 12:39   ` [PATCH v2 15/15] net/i40e: " Ciara Loftus

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250807123949.4063416-5-ciara.loftus@intel.com \
    --to=ciara.loftus@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).