From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: Ciara Loftus <ciara.loftus@intel.com>
Subject: [PATCH 08/13] net/idpf: use common Tx path selection infrastructure
Date: Tue, 9 Dec 2025 11:26:47 +0000 [thread overview]
Message-ID: <20251209112652.963981-9-ciara.loftus@intel.com> (raw)
In-Reply-To: <20251209112652.963981-1-ciara.loftus@intel.com>
Replace the existing complicated logic with the use of the common
function. Introduce a new feature "single queue" to the common
infrastructure which represents whether single or split queue mode is
used in the given path.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/common/tx.h | 5 +
drivers/net/intel/idpf/idpf_common_device.h | 10 ++
drivers/net/intel/idpf/idpf_common_rxtx.c | 49 ++++++++
drivers/net/intel/idpf/idpf_common_rxtx.h | 12 ++
drivers/net/intel/idpf/idpf_rxtx.c | 112 +++++-------------
drivers/net/intel/idpf/idpf_rxtx_vec_common.h | 10 --
6 files changed, 107 insertions(+), 91 deletions(-)
diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
index 5d965a86c9..32cee09e8f 100644
--- a/drivers/net/intel/common/tx.h
+++ b/drivers/net/intel/common/tx.h
@@ -122,6 +122,7 @@ struct ci_tx_path_features_extra {
bool simple_tx;
bool ctx_desc;
bool disabled;
+ bool single_queue;
};
struct ci_tx_path_features {
@@ -318,6 +319,10 @@ ci_tx_path_select(struct ci_tx_path_features req_features,
if (path_features->extra.simple_tx && !req_features.extra.simple_tx)
continue;
+ /* If requested, ensure the path supports single queue TX. */
+ if (path_features->extra.single_queue != req_features.extra.single_queue)
+ continue;
+
/* Ensure the path supports the requested TX offloads. */
if ((path_features->tx_offloads & req_features.tx_offloads) !=
req_features.tx_offloads)
diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
index c32dcfbb12..eff04a83eb 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -75,6 +75,15 @@ enum idpf_rx_func_type {
IDPF_RX_MAX
};
+enum idpf_tx_func_type {
+ IDPF_TX_DEFAULT,
+ IDPF_TX_SINGLEQ,
+ IDPF_TX_SINGLEQ_AVX2,
+ IDPF_TX_AVX512,
+ IDPF_TX_SINGLEQ_AVX512,
+ IDPF_TX_MAX
+};
+
struct idpf_adapter {
struct idpf_hw hw;
struct virtchnl2_version_info virtchnl_version;
@@ -92,6 +101,7 @@ struct idpf_adapter {
uint64_t time_hw;
enum idpf_rx_func_type rx_func_type;
+ enum idpf_tx_func_type tx_func_type;
};
struct idpf_chunks_info {
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
index a5d0795057..2d926ee939 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
@@ -1701,3 +1701,52 @@ const struct ci_rx_path_info idpf_rx_path_infos[] = {
#endif /* CC_AVX512_SUPPORT */
#endif /* RTE_ARCH_X86 */
};
+
+RTE_EXPORT_INTERNAL_SYMBOL(idpf_tx_path_infos)
+const struct ci_tx_path_info idpf_tx_path_infos[] = {
+ [IDPF_TX_DEFAULT] = {
+ .pkt_burst = idpf_dp_splitq_xmit_pkts,
+ .info = "Split Scalar",
+ .features = {
+ .tx_offloads = IDPF_TX_SCALAR_OFFLOADS
+ }
+ },
+ [IDPF_TX_SINGLEQ] = {
+ .pkt_burst = idpf_dp_singleq_xmit_pkts,
+ .info = "Single Scalar",
+ .features = {
+ .tx_offloads = IDPF_TX_SCALAR_OFFLOADS,
+ .extra.single_queue = true
+ }
+ },
+#ifdef RTE_ARCH_X86
+ [IDPF_TX_SINGLEQ_AVX2] = {
+ .pkt_burst = idpf_dp_singleq_xmit_pkts_avx2,
+ .info = "Single AVX2",
+ .features = {
+ .tx_offloads = IDPF_TX_VECTOR_OFFLOADS,
+ .simd_width = RTE_VECT_SIMD_256,
+ .extra.single_queue = true
+ }
+ },
+#ifdef CC_AVX512_SUPPORT
+ [IDPF_TX_AVX512] = {
+ .pkt_burst = idpf_dp_splitq_xmit_pkts_avx512,
+ .info = "Split AVX512",
+ .features = {
+ .tx_offloads = IDPF_TX_VECTOR_OFFLOADS,
+ .simd_width = RTE_VECT_SIMD_512
+ }
+ },
+ [IDPF_TX_SINGLEQ_AVX512] = {
+ .pkt_burst = idpf_dp_singleq_xmit_pkts_avx512,
+ .info = "Single AVX512",
+ .features = {
+ .tx_offloads = IDPF_TX_VECTOR_OFFLOADS,
+ .simd_width = RTE_VECT_SIMD_512,
+ .extra.single_queue = true
+ }
+ },
+#endif /* CC_AVX512_SUPPORT */
+#endif /* RTE_ARCH_X86 */
+};
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.h b/drivers/net/intel/idpf/idpf_common_rxtx.h
index 3bc3323af4..7c6ff5d047 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.h
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.h
@@ -106,6 +106,17 @@
RTE_ETH_RX_OFFLOAD_SCATTER)
#define IDPF_RX_VECTOR_OFFLOADS 0
+#define IDPF_TX_SCALAR_OFFLOADS ( \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+
+#define IDPF_TX_VECTOR_OFFLOADS RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE
+
struct idpf_rx_stats {
RTE_ATOMIC(uint64_t) mbuf_alloc_failed;
};
@@ -264,5 +275,6 @@ uint16_t idpf_dp_singleq_xmit_pkts_avx2(void *tx_queue,
uint16_t nb_pkts);
extern const struct ci_rx_path_info idpf_rx_path_infos[IDPF_RX_MAX];
+extern const struct ci_tx_path_info idpf_tx_path_infos[IDPF_TX_MAX];
#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/net/intel/idpf/idpf_rxtx.c b/drivers/net/intel/idpf/idpf_rxtx.c
index 4796d8b862..1fd55de9ab 100644
--- a/drivers/net/intel/idpf/idpf_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_rxtx.c
@@ -813,97 +813,47 @@ idpf_set_tx_function(struct rte_eth_dev *dev)
{
struct idpf_vport *vport = dev->data->dev_private;
#ifdef RTE_ARCH_X86
- enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED;
#ifdef CC_AVX512_SUPPORT
struct ci_tx_queue *txq;
int i;
#endif /* CC_AVX512_SUPPORT */
-
- if (idpf_tx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- vport->tx_vec_allowed = true;
- tx_simd_width = idpf_get_max_simd_bitwidth();
-#ifdef CC_AVX512_SUPPORT
- if (tx_simd_width == RTE_VECT_SIMD_512) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
- }
- }
-#else
- PMD_DRV_LOG(NOTICE,
- "AVX512 is not supported in build env");
-#endif /* CC_AVX512_SUPPORT */
- } else {
- vport->tx_vec_allowed = false;
- }
#endif /* RTE_ARCH_X86 */
+ struct idpf_adapter *ad = vport->adapter;
+ struct ci_tx_path_features req_features = {
+ .tx_offloads = dev->data->dev_conf.txmode.offloads,
+ .simd_width = RTE_VECT_SIMD_DISABLED,
+ .extra.single_queue = (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ };
+
+#ifdef RTE_ARCH_X86
+ if (idpf_tx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH)
+ req_features.simd_width = idpf_get_max_simd_bitwidth();
+#endif
+
+ ad->tx_func_type = ci_tx_path_select(req_features,
+ &idpf_tx_path_infos[0],
+ IDPF_TX_MAX,
+ IDPF_TX_DEFAULT);
+
+ dev->tx_pkt_burst = idpf_tx_path_infos[ad->tx_func_type].pkt_burst;
+ dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+ PMD_DRV_LOG(NOTICE, "Using %s Tx (port %d).",
+ idpf_tx_path_infos[ad->tx_func_type].info, dev->data->port_id);
#ifdef RTE_ARCH_X86
- if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- if (vport->tx_vec_allowed) {
+ if (idpf_tx_path_infos[ad->tx_func_type].features.simd_width >= RTE_VECT_SIMD_256 &&
+ idpf_tx_path_infos[ad->tx_func_type].features.extra.single_queue) {
#ifdef CC_AVX512_SUPPORT
- if (tx_simd_width == RTE_VECT_SIMD_512) {
- PMD_DRV_LOG(NOTICE,
- "Using Split AVX512 Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts_avx512;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- return;
- }
-#endif /* CC_AVX512_SUPPORT */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ continue;
+ if (idpf_tx_path_infos[ad->tx_func_type].features.simd_width ==
+ RTE_VECT_SIMD_512)
+ idpf_qc_tx_vec_avx512_setup(txq);
}
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- } else {
- if (vport->tx_vec_allowed) {
-#ifdef CC_AVX512_SUPPORT
- if (tx_simd_width == RTE_VECT_SIMD_512) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
- continue;
- idpf_qc_tx_vec_avx512_setup(txq);
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX512 Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx512;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- return;
- }
#endif /* CC_AVX512_SUPPORT */
- if (tx_simd_width == RTE_VECT_SIMD_256) {
- PMD_DRV_LOG(NOTICE,
- "Using Single AVX2 Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts_avx2;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- return;
- }
- }
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- }
-#else
- if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- PMD_DRV_LOG(NOTICE,
- "Using Split Scalar Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_splitq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
- } else {
- PMD_DRV_LOG(NOTICE,
- "Using Single Scalar Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = idpf_dp_singleq_xmit_pkts;
- dev->tx_pkt_prepare = idpf_dp_prep_pkts;
+ vport->tx_vec_allowed = true;
}
#endif /* RTE_ARCH_X86 */
}
diff --git a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
index ecdf2f0e23..425f0792a1 100644
--- a/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
+++ b/drivers/net/intel/idpf/idpf_rxtx_vec_common.h
@@ -23,13 +23,6 @@
RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-#define IDPF_TX_NO_VECTOR_FLAGS ( \
- RTE_ETH_TX_OFFLOAD_TCP_TSO | \
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
- RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
- RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
- RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
static inline int
idpf_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
@@ -74,9 +67,6 @@ idpf_tx_vec_queue_default(struct ci_tx_queue *txq)
(txq->tx_rs_thresh & 3) != 0)
return IDPF_SCALAR_PATH;
- if ((txq->offloads & IDPF_TX_NO_VECTOR_FLAGS) != 0)
- return IDPF_SCALAR_PATH;
-
return IDPF_VECTOR_PATH;
}
--
2.43.0
next prev parent reply other threads:[~2025-12-09 11:28 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-09 11:26 [PATCH 00/13] net/intel: tx path selection simplification Ciara Loftus
2025-12-09 11:26 ` [PATCH 01/13] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-11 10:25 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 02/13] net/ice: use same Tx path across processes Ciara Loftus
2025-12-11 11:39 ` Bruce Richardson
2025-12-12 10:39 ` Loftus, Ciara
2025-12-09 11:26 ` [PATCH 03/13] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-11 11:56 ` Bruce Richardson
2025-12-11 12:02 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 04/13] net/iavf: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` [PATCH 05/13] net/iavf: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` [PATCH 06/13] net/i40e: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` [PATCH 07/13] net/i40e: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` Ciara Loftus [this message]
2025-12-09 11:26 ` [PATCH 09/13] net/cpfl: " Ciara Loftus
2025-12-09 11:26 ` [PATCH 10/13] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-11 11:58 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 11/13] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-11 11:58 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 12/13] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-11 11:59 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 13/13] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-11 11:59 ` Bruce Richardson
2025-12-12 10:33 ` [PATCH v2 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 03/10] net/iavf: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 04/10] net/i40e: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 05/10] net/idpf: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 06/10] net/cpfl: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 03/10] net/iavf: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 04/10] net/i40e: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 05/10] net/idpf: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 06/10] net/cpfl: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251209112652.963981-9-ciara.loftus@intel.com \
--to=ciara.loftus@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).