From: Ciara Loftus <ciara.loftus@intel.com>
To: dev@dpdk.org
Cc: Ciara Loftus <ciara.loftus@intel.com>
Subject: [PATCH 02/13] net/ice: use same Tx path across processes
Date: Tue, 9 Dec 2025 11:26:41 +0000 [thread overview]
Message-ID: <20251209112652.963981-3-ciara.loftus@intel.com> (raw)
In-Reply-To: <20251209112652.963981-1-ciara.loftus@intel.com>
In the interest of simplicity, let the primary process select the Tx
path to be used by all processes using the given device.
The many logs which report individual Tx path selections have been
consolidated into one single log.
Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
---
drivers/net/intel/ice/ice_ethdev.c | 1 +
drivers/net/intel/ice/ice_ethdev.h | 12 ++-
drivers/net/intel/ice/ice_rxtx.c | 139 ++++++++++++++++-------------
3 files changed, 87 insertions(+), 65 deletions(-)
diff --git a/drivers/net/intel/ice/ice_ethdev.c b/drivers/net/intel/ice/ice_ethdev.c
index c721d135f5..a805e78d03 100644
--- a/drivers/net/intel/ice/ice_ethdev.c
+++ b/drivers/net/intel/ice/ice_ethdev.c
@@ -3900,6 +3900,7 @@ ice_dev_configure(struct rte_eth_dev *dev)
ad->tx_simple_allowed = true;
ad->rx_func_type = ICE_RX_DEFAULT;
+ ad->tx_func_type = ICE_TX_DEFAULT;
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
diff --git a/drivers/net/intel/ice/ice_ethdev.h b/drivers/net/intel/ice/ice_ethdev.h
index 72ed65f13b..0b8af339d1 100644
--- a/drivers/net/intel/ice/ice_ethdev.h
+++ b/drivers/net/intel/ice/ice_ethdev.h
@@ -208,6 +208,16 @@ enum ice_rx_func_type {
ICE_RX_AVX512_SCATTERED_OFFLOAD,
};
+enum ice_tx_func_type {
+ ICE_TX_DEFAULT,
+ ICE_TX_SIMPLE,
+ ICE_TX_SSE,
+ ICE_TX_AVX2,
+ ICE_TX_AVX2_OFFLOAD,
+ ICE_TX_AVX512,
+ ICE_TX_AVX512_OFFLOAD,
+};
+
struct ice_adapter;
/**
@@ -658,6 +668,7 @@ struct ice_adapter {
bool tx_vec_allowed;
bool tx_simple_allowed;
enum ice_rx_func_type rx_func_type;
+ enum ice_tx_func_type tx_func_type;
/* ptype mapping table */
alignas(RTE_CACHE_LINE_MIN_SIZE) uint32_t ptype_tbl[ICE_MAX_PKT_TYPE];
bool is_safe_mode;
@@ -679,7 +690,6 @@ struct ice_adapter {
/* Set bit if the engine is disabled */
unsigned long disabled_engine_mask;
struct ice_parser *psr;
- enum rte_vect_max_simd tx_simd_width;
bool rx_vec_offload_support;
};
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index 74db0fbec9..f05ca83e5b 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -4091,6 +4091,44 @@ ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+static const struct {
+ eth_tx_burst_t pkt_burst;
+ const char *info;
+} ice_tx_burst_infos[] = {
+ [ICE_TX_DEFAULT] = {
+ .pkt_burst = ice_xmit_pkts,
+ .info = "Scalar"
+ },
+ [ICE_TX_SIMPLE] = {
+ .pkt_burst = ice_xmit_pkts_simple,
+ .info = "Scalar Simple"
+ },
+#ifdef RTE_ARCH_X86
+ [ICE_TX_SSE] = {
+ .pkt_burst = ice_xmit_pkts_vec,
+ .info = "Vector SSE"
+ },
+ [ICE_TX_AVX2] = {
+ .pkt_burst = ice_xmit_pkts_vec_avx2,
+ .info = "Vector AVX2"
+ },
+ [ICE_TX_AVX2_OFFLOAD] = {
+ .pkt_burst = ice_xmit_pkts_vec_avx2_offload,
+ .info = "Offload Vector AVX2"
+ },
+#ifdef CC_AVX512_SUPPORT
+ [ICE_TX_AVX512] = {
+ .pkt_burst = ice_xmit_pkts_vec_avx512,
+ .info = "Vector AVX512"
+ },
+ [ICE_TX_AVX512_OFFLOAD] = {
+ .pkt_burst = ice_xmit_pkts_vec_avx512_offload,
+ .info = "Offload Vector AVX512"
+ },
+#endif
+#endif
+};
+
void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
@@ -4101,74 +4139,58 @@ ice_set_tx_function(struct rte_eth_dev *dev)
struct ci_tx_queue *txq;
int i;
int tx_check_ret = -1;
+ enum rte_vect_max_simd tx_simd_width = RTE_VECT_SIMD_DISABLED;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- ad->tx_simd_width = RTE_VECT_SIMD_DISABLED;
- tx_check_ret = ice_tx_vec_dev_check(dev);
- ad->tx_simd_width = ice_get_max_simd_bitwidth();
- if (tx_check_ret >= 0 &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- ad->tx_vec_allowed = true;
-
- if (ad->tx_simd_width < RTE_VECT_SIMD_256 &&
- tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
- ad->tx_vec_allowed = false;
-
- if (ad->tx_vec_allowed) {
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq && ice_txq_vec_setup(txq)) {
- ad->tx_vec_allowed = false;
- break;
- }
+ /* The primary process selects the tx path for all processes. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ goto out;
+
+ tx_check_ret = ice_tx_vec_dev_check(dev);
+ tx_simd_width = ice_get_max_simd_bitwidth();
+ if (tx_check_ret >= 0 &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+ ad->tx_vec_allowed = true;
+
+ if (tx_simd_width < RTE_VECT_SIMD_256 &&
+ tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+ ad->tx_vec_allowed = false;
+
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq && ice_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
}
}
- } else {
- ad->tx_vec_allowed = false;
}
+ } else {
+ ad->tx_vec_allowed = false;
}
if (ad->tx_vec_allowed) {
dev->tx_pkt_prepare = rte_eth_tx_pkt_prepare_dummy;
- if (ad->tx_simd_width == RTE_VECT_SIMD_512) {
+ if (tx_simd_width == RTE_VECT_SIMD_512) {
#ifdef CC_AVX512_SUPPORT
if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
- PMD_DRV_LOG(NOTICE,
- "Using AVX512 OFFLOAD Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst =
- ice_xmit_pkts_vec_avx512_offload;
+ ad->tx_func_type = ICE_TX_AVX512_OFFLOAD;
dev->tx_pkt_prepare = ice_prep_pkts;
} else {
- PMD_DRV_LOG(NOTICE,
- "Using AVX512 Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+ ad->tx_func_type = ICE_TX_AVX512;
}
#endif
} else {
if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
- PMD_DRV_LOG(NOTICE,
- "Using AVX2 OFFLOAD Vector Tx (port %d).",
- dev->data->port_id);
- dev->tx_pkt_burst =
- ice_xmit_pkts_vec_avx2_offload;
+ ad->tx_func_type = ICE_TX_AVX2_OFFLOAD;
dev->tx_pkt_prepare = ice_prep_pkts;
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- ad->tx_simd_width == RTE_VECT_SIMD_256 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = ad->tx_simd_width == RTE_VECT_SIMD_256 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
+ ad->tx_func_type = tx_simd_width == RTE_VECT_SIMD_256 ?
+ ICE_TX_AVX2 :
+ ICE_TX_SSE;
}
}
- if (mbuf_check) {
- ad->tx_pkt_burst = dev->tx_pkt_burst;
- dev->tx_pkt_burst = ice_xmit_pkts_check;
- }
- return;
+ goto out;
}
#endif
@@ -4186,24 +4208,13 @@ ice_set_tx_function(struct rte_eth_dev *dev)
ad->tx_pkt_burst = dev->tx_pkt_burst;
dev->tx_pkt_burst = ice_xmit_pkts_check;
}
-}
-static const struct {
- eth_tx_burst_t pkt_burst;
- const char *info;
-} ice_tx_burst_infos[] = {
- { ice_xmit_pkts_simple, "Scalar Simple" },
- { ice_xmit_pkts, "Scalar" },
-#ifdef RTE_ARCH_X86
-#ifdef CC_AVX512_SUPPORT
- { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
- { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
-#endif
- { ice_xmit_pkts_vec_avx2, "Vector AVX2" },
- { ice_xmit_pkts_vec_avx2_offload, "Offload Vector AVX2" },
- { ice_xmit_pkts_vec, "Vector SSE" },
-#endif
-};
+out:
+ dev->tx_pkt_burst = mbuf_check ? ice_xmit_pkts_check :
+ ice_tx_burst_infos[ad->tx_func_type].pkt_burst;
+ PMD_DRV_LOG(NOTICE, "Using %s (port %d).",
+ ice_tx_burst_infos[ad->tx_func_type].info, dev->data->port_id);
+}
int
ice_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
--
2.43.0
next prev parent reply other threads:[~2025-12-09 11:27 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-09 11:26 [PATCH 00/13] net/intel: tx path selection simplification Ciara Loftus
2025-12-09 11:26 ` [PATCH 01/13] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-11 10:25 ` Bruce Richardson
2025-12-09 11:26 ` Ciara Loftus [this message]
2025-12-11 11:39 ` [PATCH 02/13] net/ice: use same Tx path across processes Bruce Richardson
2025-12-12 10:39 ` Loftus, Ciara
2025-12-09 11:26 ` [PATCH 03/13] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-11 11:56 ` Bruce Richardson
2025-12-11 12:02 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 04/13] net/iavf: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` [PATCH 05/13] net/iavf: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` [PATCH 06/13] net/i40e: use same Tx path across processes Ciara Loftus
2025-12-09 11:26 ` [PATCH 07/13] net/i40e: use common Tx path selection infrastructure Ciara Loftus
2025-12-09 11:26 ` [PATCH 08/13] net/idpf: " Ciara Loftus
2025-12-09 11:26 ` [PATCH 09/13] net/cpfl: " Ciara Loftus
2025-12-09 11:26 ` [PATCH 10/13] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-11 11:58 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 11/13] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-11 11:58 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 12/13] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-11 11:59 ` Bruce Richardson
2025-12-09 11:26 ` [PATCH 13/13] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-11 11:59 ` Bruce Richardson
2025-12-12 10:33 ` [PATCH v2 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 03/10] net/iavf: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 04/10] net/i40e: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 05/10] net/idpf: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 06/10] net/cpfl: " Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 10:33 ` [PATCH v2 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 00/10] net/intel: tx path selection simplification Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 01/10] net/intel: introduce infrastructure for Tx path selection Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 02/10] net/ice: use common Tx path selection infrastructure Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 03/10] net/iavf: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 04/10] net/i40e: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 05/10] net/idpf: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 06/10] net/cpfl: " Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 07/10] docs: fix TSO and checksum offload feature status in ice doc Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 08/10] docs: fix TSO feature status in iavf driver documentation Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 09/10] docs: fix inline crypto feature status in iavf driver doc Ciara Loftus
2025-12-12 11:06 ` [PATCH v3 10/10] docs: fix TSO feature status in i40e driver documentation Ciara Loftus
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251209112652.963981-3-ciara.loftus@intel.com \
--to=ciara.loftus@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).