DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: Ciara Loftus <ciara.loftus@intel.com>
Subject: [PATCH 07/13] net/i40e: use common Tx path selection infrastructure
Date: Tue,  9 Dec 2025 11:26:46 +0000	[thread overview]
Message-ID: <20251209112652.963981-8-ciara.loftus@intel.com> (raw)
In-Reply-To: <20251209112652.963981-1-ciara.loftus@intel.com>

Replace the existing complicated logic with the use of the common
function.

Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
 drivers/net/intel/i40e/i40e_ethdev.h          |   2 -
 drivers/net/intel/i40e/i40e_rxtx.c            | 112 +++++++++---------
 drivers/net/intel/i40e/i40e_rxtx.h            |  20 +++-
 .../net/intel/i40e/i40e_rxtx_vec_altivec.c    |   6 -
 drivers/net/intel/i40e/i40e_rxtx_vec_neon.c   |   6 -
 drivers/net/intel/i40e/i40e_rxtx_vec_sse.c    |   6 -
 6 files changed, 78 insertions(+), 74 deletions(-)

diff --git a/drivers/net/intel/i40e/i40e_ethdev.h b/drivers/net/intel/i40e/i40e_ethdev.h
index 9a89f94f0e..8a86e26858 100644
--- a/drivers/net/intel/i40e/i40e_ethdev.h
+++ b/drivers/net/intel/i40e/i40e_ethdev.h
@@ -1290,8 +1290,6 @@ struct i40e_adapter {
 
 	/* For RSS reta table update */
 	uint8_t rss_reta_updated;
-
-	enum rte_vect_max_simd tx_simd_width;
 };
 
 /**
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index 04c3a6c311..1f9ccd2aa7 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -2375,8 +2375,7 @@ i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
 
 	/* check vector conflict */
 	if (ad->tx_vec_allowed) {
-		if (txq->tx_rs_thresh > I40E_TX_MAX_FREE_BUF_SZ ||
-		    i40e_txq_vec_setup(txq)) {
+		if (txq->tx_rs_thresh > I40E_TX_MAX_FREE_BUF_SZ) {
 			PMD_DRV_LOG(ERR, "Failed vector tx setup.");
 			return -EINVAL;
 		}
@@ -3519,42 +3518,73 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
 				txq->queue_id);
 }
 
-static const struct {
-	eth_tx_burst_t pkt_burst;
-	const char *info;
-} i40e_tx_burst_infos[] = {
+static const struct ci_tx_path_info i40e_tx_path_infos[] = {
 	[I40E_TX_DEFAULT] = {
 		.pkt_burst = i40e_xmit_pkts,
 		.info = "Scalar",
+		.features = {
+			.tx_offloads = I40E_TX_SCALAR_OFFLOADS,
+		},
+		.pkt_prep = i40e_prep_pkts,
 	},
 	[I40E_TX_SCALAR_SIMPLE] = {
 		.pkt_burst = i40e_xmit_pkts_simple,
 		.info = "Scalar Simple",
+		.features = {
+			.tx_offloads = I40E_TX_SCALAR_OFFLOADS,
+			.extra.simple_tx = true
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 #ifdef RTE_ARCH_X86
 	[I40E_TX_SSE] = {
 		.pkt_burst = i40e_xmit_pkts_vec,
 		.info = "Vector SSE",
+		.features = {
+			.tx_offloads = I40E_TX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_128,
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 	[I40E_TX_AVX2] = {
 		.pkt_burst = i40e_xmit_pkts_vec_avx2,
 		.info = "Vector AVX2",
+		.features = {
+			.tx_offloads = I40E_TX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_256,
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 #ifdef CC_AVX512_SUPPORT
 	[I40E_TX_AVX512] = {
 		.pkt_burst = i40e_xmit_pkts_vec_avx512,
 		.info = "Vector AVX512",
+		.features = {
+			.tx_offloads = I40E_TX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_512,
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 #endif
 #elif defined(RTE_ARCH_ARM64)
 	[I40E_TX_NEON] = {
 		.pkt_burst = i40e_xmit_pkts_vec,
 		.info = "Vector Neon",
+		.features = {
+			.tx_offloads = I40E_TX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_128,
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 #elif defined(RTE_ARCH_PPC_64)
 	[I40E_TX_ALTIVEC] = {
 		.pkt_burst = i40e_xmit_pkts_vec,
 		.info = "Vector AltiVec",
+		.features = {
+			.tx_offloads = I40E_TX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_128,
+		},
+		.pkt_prep = i40e_simple_prep_pkts,
 	},
 #endif
 };
@@ -3565,64 +3595,40 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	uint64_t mbuf_check = ad->mbuf_check;
-	int i;
+	struct ci_tx_path_features req_features = {
+		.tx_offloads = dev->data->dev_conf.txmode.offloads,
+		.simd_width = RTE_VECT_SIMD_DISABLED,
+		.extra.simple_tx = ad->tx_simple_allowed
+	};
 
 	/* The primary process selects the tx path for all processes. */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		goto out;
-#ifdef RTE_ARCH_X86
-	ad->tx_simd_width = i40e_get_max_simd_bitwidth();
-#endif
-	if (ad->tx_vec_allowed) {
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			struct ci_tx_queue *txq =
-				dev->data->tx_queues[i];
-
-			if (txq && i40e_txq_vec_setup(txq)) {
-				ad->tx_vec_allowed = false;
-				break;
-			}
-		}
-	}
 
-	if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)
-		ad->tx_vec_allowed = false;
-
-	if (ad->tx_simple_allowed) {
-		if (ad->tx_vec_allowed) {
+	if (ad->tx_vec_allowed) {
 #ifdef RTE_ARCH_X86
-			if (ad->tx_simd_width == RTE_VECT_SIMD_512) {
-#ifdef CC_AVX512_SUPPORT
-				ad->tx_func_type = I40E_TX_AVX512;
+		req_features.simd_width = i40e_get_max_simd_bitwidth();
 #else
-				ad->tx_func_type = I40E_TX_DEFAULT;
+		req_features.simd_width = rte_vect_get_max_simd_bitwidth();
 #endif
-			} else {
-				ad->tx_func_type = ad->tx_simd_width == RTE_VECT_SIMD_256 ?
-						    I40E_TX_AVX2 :
-						    I40E_TX_SSE;
-				dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
-			}
-#else /* RTE_ARCH_X86 */
-			ad->tx_func_type = I40E_TX_SSE;
-			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
-#endif /* RTE_ARCH_X86 */
-		} else {
-			ad->tx_func_type = I40E_TX_SCALAR_SIMPLE;
-			dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
-		}
-		dev->tx_pkt_prepare = i40e_simple_prep_pkts;
-	} else {
-		ad->tx_func_type = I40E_TX_DEFAULT;
-		dev->tx_pkt_prepare = i40e_prep_pkts;
 	}
 
+	ad->tx_func_type = ci_tx_path_select(req_features, &i40e_tx_path_infos[0],
+						RTE_DIM(i40e_tx_path_infos), I40E_TX_DEFAULT);
+
 out:
 	dev->tx_pkt_burst = mbuf_check ? i40e_xmit_pkts_check :
-					i40e_tx_burst_infos[ad->tx_func_type].pkt_burst;
+					i40e_tx_path_infos[ad->tx_func_type].pkt_burst;
 
 	PMD_DRV_LOG(NOTICE, "Using %s (port %d).",
-		i40e_tx_burst_infos[ad->tx_func_type].info, dev->data->port_id);
+		i40e_tx_path_infos[ad->tx_func_type].info, dev->data->port_id);
+
+	if (ad->tx_func_type == I40E_TX_SCALAR_SIMPLE ||
+			ad->tx_func_type == I40E_TX_SSE ||
+			ad->tx_func_type == I40E_TX_NEON ||
+			ad->tx_func_type == I40E_TX_ALTIVEC ||
+			ad->tx_func_type == I40E_TX_AVX2)
+		dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
 }
 
 int
@@ -3633,10 +3639,10 @@ i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 	int ret = -EINVAL;
 	unsigned int i;
 
-	for (i = 0; i < RTE_DIM(i40e_tx_burst_infos); ++i) {
-		if (pkt_burst == i40e_tx_burst_infos[i].pkt_burst) {
+	for (i = 0; i < RTE_DIM(i40e_tx_path_infos); ++i) {
+		if (pkt_burst == i40e_tx_path_infos[i].pkt_burst) {
 			snprintf(mode->info, sizeof(mode->info), "%s",
-				 i40e_tx_burst_infos[i].info);
+				 i40e_tx_path_infos[i].info);
 			ret = 0;
 			break;
 		}
diff --git a/drivers/net/intel/i40e/i40e_rxtx.h b/drivers/net/intel/i40e/i40e_rxtx.h
index b5a901794f..ed173d8f17 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.h
+++ b/drivers/net/intel/i40e/i40e_rxtx.h
@@ -91,6 +91,25 @@ enum i40e_header_split_mode {
 		RTE_ETH_RX_OFFLOAD_VLAN_FILTER |	\
 		RTE_ETH_RX_OFFLOAD_RSS_HASH)
 
+#define I40E_TX_SCALAR_OFFLOADS (			\
+		RTE_ETH_TX_OFFLOAD_VLAN_INSERT |	\
+		RTE_ETH_TX_OFFLOAD_QINQ_INSERT |	\
+		RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_UDP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_TCP_TSO |		\
+		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |	\
+		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |	\
+		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |	\
+		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |	\
+		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		\
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+
+#define I40E_TX_VECTOR_OFFLOADS RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
+
 /** Offload features */
 union i40e_tx_offload {
 	uint64_t data;
@@ -165,7 +184,6 @@ uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
 				      uint16_t nb_pkts);
 int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct ci_rx_queue *rxq);
-int i40e_txq_vec_setup(struct ci_tx_queue *txq);
 void i40e_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq);
 uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				   uint16_t nb_pkts);
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
index 87a57e7520..bbb6d907cf 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_altivec.c
@@ -547,12 +547,6 @@ i40e_rxq_vec_setup(struct ci_rx_queue *rxq)
 	return 0;
 }
 
-int __rte_cold
-i40e_txq_vec_setup(struct ci_tx_queue __rte_unused * txq)
-{
-	return 0;
-}
-
 int __rte_cold
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
index c9098e4c1a..b5be0c1b59 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_neon.c
@@ -697,12 +697,6 @@ i40e_rxq_vec_setup(struct ci_rx_queue *rxq)
 	return 0;
 }
 
-int __rte_cold
-i40e_txq_vec_setup(struct ci_tx_queue *txq __rte_unused)
-{
-	return 0;
-}
-
 int __rte_cold
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/intel/i40e/i40e_rxtx_vec_sse.c b/drivers/net/intel/i40e/i40e_rxtx_vec_sse.c
index c035408dcc..c209135890 100644
--- a/drivers/net/intel/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/intel/i40e/i40e_rxtx_vec_sse.c
@@ -704,12 +704,6 @@ i40e_rxq_vec_setup(struct ci_rx_queue *rxq)
 	return 0;
 }
 
-int __rte_cold
-i40e_txq_vec_setup(struct ci_tx_queue *txq __rte_unused)
-{
-	return 0;
-}
-
 int __rte_cold
 i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 {
-- 
2.43.0


  parent reply	other threads:[~2025-12-09 11:27 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-09 11:26 [PATCH 00/13] net/intel: tx path selection simplification Ciara Loftus
2025-12-09 11:26 ` [PATCH 01/13] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-11 10:25   ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 02/13] net/ice: use same Tx path across processes Ciara Loftus
2025-12-11 11:39   ` Bruce Richardson
2025-12-12 10:39     ` Loftus, Ciara
2025-12-09 11:26 ` [PATCH 03/13] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-11 11:56   ` Bruce Richardson
2025-12-11 12:02     ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 04/13] net/iavf: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` [PATCH 05/13] net/iavf: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` [PATCH 06/13] net/i40e: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` Ciara Loftus [this message]
2025-12-09 11:26 ` [PATCH 08/13] net/idpf: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` [PATCH 09/13] net/cpfl: " Ciara Loftus
2025-12-09 11:26 ` [PATCH 10/13] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-11 11:58   ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 11/13] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-11 11:58   ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 12/13] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-11 11:59   ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 13/13] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-11 11:59   ` Bruce Richardson
2025-12-12 10:33 ` [PATCH v2 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 03/10] net/iavf: " Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 04/10] net/i40e: " Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 05/10] net/idpf: " Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 06/10] net/cpfl: " Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 10:33   ` [PATCH v2 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-12 11:06   ` [PATCH v3 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 03/10] net/iavf: " Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 04/10] net/i40e: " Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 05/10] net/idpf: " Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 06/10] net/cpfl: " Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 11:06     ` [PATCH v3 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251209112652.963981-8-ciara.loftus@intel.com \
    --to=ciara.loftus@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).