From: Bruce Richardson <bruce.richardson@intel.com>
To: Ciara Loftus <ciara.loftus@intel.com>
Cc: <dev@dpdk.org>
Subject: Re: [PATCH v2 13/15] net/ice: use the common Rx path selection infrastructure
Date: Mon, 11 Aug 2025 18:03:54 +0100 [thread overview]
Message-ID: <aJoies3-W51XiRx6@bricha3-mobl1.ger.corp.intel.com> (raw)
In-Reply-To: <20250807123949.4063416-14-ciara.loftus@intel.com>
On Thu, Aug 07, 2025 at 12:39:47PM +0000, Ciara Loftus wrote:
> Replace the existing complicated logic with the use of the common
> function.
>
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com>
> ---
> v2:
> * use the new names for the renamed structs and functions
> ---
> drivers/net/intel/ice/ice_ethdev.h | 1 -
> drivers/net/intel/ice/ice_rxtx.c | 162 +++++++++-----------
> drivers/net/intel/ice/ice_rxtx.h | 28 ++++
> drivers/net/intel/ice/ice_rxtx_vec_common.h | 17 +-
> 4 files changed, 102 insertions(+), 106 deletions(-)
>
> diff --git a/drivers/net/intel/ice/ice_ethdev.h b/drivers/net/intel/ice/ice_ethdev.h
> index 8d975c23de..c9b0b86836 100644
> --- a/drivers/net/intel/ice/ice_ethdev.h
> +++ b/drivers/net/intel/ice/ice_ethdev.h
> @@ -651,7 +651,6 @@ struct ice_adapter {
> struct ice_hw hw;
> struct ice_pf pf;
> bool rx_bulk_alloc_allowed;
> - bool rx_vec_allowed;
> bool tx_vec_allowed;
> bool tx_simple_allowed;
> enum ice_rx_func_type rx_func_type;
> diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
> index 8c197eefa9..b54edd7a6a 100644
> --- a/drivers/net/intel/ice/ice_rxtx.c
> +++ b/drivers/net/intel/ice/ice_rxtx.c
> @@ -3662,28 +3662,46 @@ ice_xmit_pkts_simple(void *tx_queue,
> return nb_tx;
> }
>
> -static const struct {
> - eth_rx_burst_t pkt_burst;
> - const char *info;
> -} ice_rx_burst_infos[] = {
> - [ICE_RX_DEFAULT] = { ice_recv_pkts, "Scalar" },
> - [ICE_RX_SCATTERED] = { ice_recv_scattered_pkts, "Scalar Scattered" },
> - [ICE_RX_BULK_ALLOC] = { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
> +static const struct ci_rx_path_info ice_rx_path_infos[] = {
> + [ICE_RX_DEFAULT] = {
> + ice_recv_pkts, "Scalar",
> + {ICE_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED, 0, 0, 0, 0}},
> + [ICE_RX_SCATTERED] = {ice_recv_scattered_pkts, "Scalar Scattered",
> + {ICE_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED, CI_RX_PATH_SCATTERED, 0, 0, 0}},
> + [ICE_RX_BULK_ALLOC] = {ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc",
> + {ICE_RX_SCALAR_OFFLOADS, RTE_VECT_SIMD_DISABLED, 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> #ifdef RTE_ARCH_X86
> - [ICE_RX_SSE] = { ice_recv_pkts_vec, "Vector SSE" },
> - [ICE_RX_SSE_SCATTERED] = { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
> - [ICE_RX_AVX2] = { ice_recv_pkts_vec_avx2, "Vector AVX2" },
> - [ICE_RX_AVX2_SCATTERED] = { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
> - [ICE_RX_AVX2_OFFLOAD] = { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
> + [ICE_RX_SSE] = {ice_recv_pkts_vec, "Vector SSE",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_128,
> + 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_SSE_SCATTERED] = {ice_recv_scattered_pkts_vec, "Vector SSE Scattered",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_128,
> + CI_RX_PATH_SCATTERED, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_AVX2] = {ice_recv_pkts_vec_avx2, "Vector AVX2",
> + {ICE_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_256, 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_AVX2_SCATTERED] = {ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered",
> + {ICE_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_256,
> + CI_RX_PATH_SCATTERED, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_AVX2_OFFLOAD] = {ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_256,
> + 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> [ICE_RX_AVX2_SCATTERED_OFFLOAD] = {
> - ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
> + ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_256,
> + CI_RX_PATH_SCATTERED, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> #ifdef CC_AVX512_SUPPORT
> - [ICE_RX_AVX512] = { ice_recv_pkts_vec_avx512, "Vector AVX512" },
> - [ICE_RX_AVX512_SCATTERED] = {
> - ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
> - [ICE_RX_AVX512_OFFLOAD] = { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
> + [ICE_RX_AVX512] = {ice_recv_pkts_vec_avx512, "Vector AVX512",
> + {ICE_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512, 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_AVX512_SCATTERED] = {ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered",
> + {ICE_RX_VECTOR_OFFLOADS, RTE_VECT_SIMD_512,
> + CI_RX_PATH_SCATTERED, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> + [ICE_RX_AVX512_OFFLOAD] = {ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_512,
> + 0, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> [ICE_RX_AVX512_SCATTERED_OFFLOAD] = {
> - ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
> + ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered",
> + {ICE_RX_VECTOR_OFFLOAD_OFFLOADS, RTE_VECT_SIMD_512,
> + CI_RX_PATH_SCATTERED, 0, CI_RX_PATH_BULK_ALLOC, 0}},
> #endif
> #endif
> };
> @@ -3694,89 +3712,51 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> PMD_INIT_FUNC_TRACE();
> struct ice_adapter *ad =
> ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> + enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
> + struct ci_rx_path_features req_features = {
> + .rx_offloads = dev->data->dev_conf.rxmode.offloads,
> + .simd_width = RTE_VECT_SIMD_DISABLED,
> + };
>
> /* The primary process selects the rx path for all processes. */
> if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> goto out;
>
> #ifdef RTE_ARCH_X86
> - struct ci_rx_queue *rxq;
> - int i;
> - int rx_check_ret = -1;
> - enum rte_vect_max_simd rx_simd_width = RTE_VECT_SIMD_DISABLED;
> -
> - rx_check_ret = ice_rx_vec_dev_check(dev);
> - if (ad->ptp_ena)
> - rx_check_ret = -1;
> - ad->rx_vec_offload_support =
> - (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
> - if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
> - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
> - ad->rx_vec_allowed = true;
> - for (i = 0; i < dev->data->nb_rx_queues; i++) {
> - rxq = dev->data->rx_queues[i];
> - if (rxq && ice_rxq_vec_setup(rxq)) {
> - ad->rx_vec_allowed = false;
> - break;
> - }
> - }
> - rx_simd_width = ice_get_max_simd_bitwidth();
> -
> + if (ad->ptp_ena || !ad->rx_bulk_alloc_allowed) {
> + rx_simd_width = RTE_VECT_SIMD_DISABLED;
> } else {
> - ad->rx_vec_allowed = false;
> - }
> -
> - if (ad->rx_vec_allowed) {
> - if (dev->data->scattered_rx) {
> - if (rx_simd_width == RTE_VECT_SIMD_512) {
> -#ifdef CC_AVX512_SUPPORT
> - if (ad->rx_vec_offload_support)
> - ad->rx_func_type = ICE_RX_AVX512_SCATTERED_OFFLOAD;
> - else
> - ad->rx_func_type = ICE_RX_AVX512_SCATTERED;
> -#endif
> - } else if (rx_simd_width == RTE_VECT_SIMD_256) {
> - if (ad->rx_vec_offload_support)
> - ad->rx_func_type = ICE_RX_AVX2_SCATTERED_OFFLOAD;
> - else
> - ad->rx_func_type = ICE_RX_AVX2_SCATTERED;
> - } else {
> - ad->rx_func_type = ICE_RX_SSE_SCATTERED;
> - }
> - } else {
> - if (rx_simd_width == RTE_VECT_SIMD_512) {
> -#ifdef CC_AVX512_SUPPORT
> - if (ad->rx_vec_offload_support)
> - ad->rx_func_type = ICE_RX_AVX512_OFFLOAD;
> - else
> - ad->rx_func_type = ICE_RX_AVX512;
> -#endif
> - } else if (rx_simd_width == RTE_VECT_SIMD_256) {
> - if (ad->rx_vec_offload_support)
> - ad->rx_func_type = ICE_RX_AVX2_OFFLOAD;
> - else
> - ad->rx_func_type = ICE_RX_AVX2;
> - } else {
> - ad->rx_func_type = ICE_RX_SSE;
> - }
> - }
> - goto out;
> + rx_simd_width = ice_get_max_simd_bitwidth();
> + if (rx_simd_width >= RTE_VECT_SIMD_128)
> + if (ice_rx_vec_dev_check(dev) == -1)
> + rx_simd_width = RTE_VECT_SIMD_DISABLED;
> }
> -
> #endif
>
> + req_features.simd_width = rx_simd_width;
> if (dev->data->scattered_rx)
> - /* Set the non-LRO scattered function */
> - ad->rx_func_type = ICE_RX_SCATTERED;
> - else if (ad->rx_bulk_alloc_allowed)
> - ad->rx_func_type = ICE_RX_BULK_ALLOC;
> - else
> - ad->rx_func_type = ICE_RX_DEFAULT;
> + req_features.scattered = CI_RX_PATH_SCATTERED;
> + if (ad->rx_bulk_alloc_allowed)
> + req_features.bulk_alloc = CI_RX_PATH_BULK_ALLOC;
> +
> + ad->rx_func_type = ci_rx_path_select(req_features,
> + &ice_rx_path_infos[0],
> + RTE_DIM(ice_rx_path_infos),
> + ICE_RX_DEFAULT);
> +#ifdef RTE_ARCH_X86
> + int i;
> +
> + if (ice_rx_path_infos[ad->rx_func_type].features.simd_width >= RTE_VECT_SIMD_128)
> + /* Vector function selected. Prepare the rxq accordingly. */
> + for (i = 0; i < dev->data->nb_rx_queues; i++)
> + if (dev->data->rx_queues[i])
> + ice_rxq_vec_setup(dev->data->rx_queues[i]);
> +#endif
>
> out:
> - dev->rx_pkt_burst = ice_rx_burst_infos[ad->rx_func_type].pkt_burst;
> - PMD_DRV_LOG(NOTICE, "Using %s Rx burst function (port %d).",
> - ice_rx_burst_infos[ad->rx_func_type].info, dev->data->port_id);
> + dev->rx_pkt_burst = ice_rx_path_infos[ad->rx_func_type].pkt_burst;
> + PMD_DRV_LOG(NOTICE, "Using %s (port %d).",
> + ice_rx_path_infos[ad->rx_func_type].info, dev->data->port_id);
> }
>
> int
> @@ -3787,10 +3767,10 @@ ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
> int ret = -EINVAL;
> unsigned int i;
>
> - for (i = 0; i < RTE_DIM(ice_rx_burst_infos); ++i) {
> - if (pkt_burst == ice_rx_burst_infos[i].pkt_burst) {
> + for (i = 0; i < RTE_DIM(ice_rx_path_infos); ++i) {
> + if (pkt_burst == ice_rx_path_infos[i].pkt_burst) {
> snprintf(mode->info, sizeof(mode->info), "%s",
> - ice_rx_burst_infos[i].info);
> + ice_rx_path_infos[i].info);
> ret = 0;
> break;
> }
> diff --git a/drivers/net/intel/ice/ice_rxtx.h b/drivers/net/intel/ice/ice_rxtx.h
> index 8c3d6c413a..e6a18310a0 100644
> --- a/drivers/net/intel/ice/ice_rxtx.h
> +++ b/drivers/net/intel/ice/ice_rxtx.h
> @@ -80,6 +80,34 @@
> #define ICE_TX_OFFLOAD_NOTSUP_MASK \
> (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ICE_TX_OFFLOAD_MASK)
>
> +#define ICE_RX_NO_OFFLOADS 0
> +/* basic scalar path */
> +#define ICE_RX_SCALAR_OFFLOADS ( \
> + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
> + RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
> + RTE_ETH_RX_OFFLOAD_SCATTER | \
> + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
> + RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
> + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
> + RTE_ETH_RX_OFFLOAD_TCP_CKSUM | \
> + RTE_ETH_RX_OFFLOAD_QINQ_STRIP | \
> + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
> + RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
> + RTE_ETH_RX_OFFLOAD_RSS_HASH | \
> + RTE_ETH_RX_OFFLOAD_TIMESTAMP | \
> + RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)
> +/* basic vector paths */
> +#define ICE_RX_VECTOR_OFFLOADS ( \
> + RTE_ETH_RX_OFFLOAD_KEEP_CRC | \
> + RTE_ETH_RX_OFFLOAD_SCATTER | \
> + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)
> +/* vector offload paths */
> +#define ICE_RX_VECTOR_OFFLOAD_OFFLOADS ( \
> + ICE_RX_VECTOR_OFFLOADS | \
> + RTE_ETH_RX_OFFLOAD_CHECKSUM | \
> + RTE_ETH_RX_OFFLOAD_VLAN | \
The OFFLOAD_VLAN flag includes QINQ, which is not supported by the
Rx vector path, so this needs to be replaced with OFFLOAD_VLAN_STRIP and
OFFLOAD_VLAN_FILTER.
next prev parent reply other threads:[~2025-08-11 17:04 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-25 12:49 [RFC PATCH 00/14] net/intel: rx path selection simplification Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 01/14] net/ice: use the same Rx path across process types Ciara Loftus
2025-07-25 13:40 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 02/14] net/iavf: rename Rx/Tx function type variables Ciara Loftus
2025-07-25 13:40 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 03/14] net/iavf: use the same Rx path across process types Ciara Loftus
2025-07-25 13:41 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 04/14] net/i40e: " Ciara Loftus
2025-07-25 13:43 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 05/14] net/intel: introduce common vector capability function Ciara Loftus
2025-07-25 13:45 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 06/14] net/ice: use the new " Ciara Loftus
2025-07-25 13:56 ` Bruce Richardson
2025-08-06 14:46 ` Loftus, Ciara
2025-07-25 12:49 ` [RFC PATCH 07/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 08/14] net/i40e: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 09/14] net/iavf: remove redundant field from iavf adapter struct Ciara Loftus
2025-07-25 14:51 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 10/14] net/intel: introduce infrastructure for Rx path selection Ciara Loftus
2025-07-25 15:21 ` Bruce Richardson
2025-08-06 10:14 ` Loftus, Ciara
2025-08-06 10:36 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 11/14] net/ice: remove unsupported Rx offload Ciara Loftus
2025-07-25 15:22 ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 12/14] net/ice: use the common Rx path selection infrastructure Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 13/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 14/14] net/i40e: " Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 00/15] net/intel: rx path selection simplification Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 01/15] net/ice: use the same Rx path across process types Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 02/15] net/iavf: rename Rx/Tx function type variables Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 03/15] net/iavf: use the same Rx path across process types Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 04/15] net/i40e: " Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 05/15] net/intel: introduce common vector capability function Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 06/15] net/ice: use the new " Ciara Loftus
2025-08-11 10:47 ` Bruce Richardson
2025-08-07 12:39 ` [PATCH v2 07/15] net/iavf: " Ciara Loftus
2025-08-11 10:48 ` Bruce Richardson
2025-08-07 12:39 ` [PATCH v2 08/15] net/i40e: " Ciara Loftus
2025-08-11 10:53 ` Bruce Richardson
2025-08-07 12:39 ` [PATCH v2 09/15] net/iavf: remove redundant field from iavf adapter struct Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 10/15] net/ice: remove unsupported Rx offload Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 11/15] net/iavf: reorder enum of Rx function types Ciara Loftus
2025-08-11 10:56 ` Bruce Richardson
2025-08-07 12:39 ` [PATCH v2 12/15] net/intel: introduce infrastructure for Rx path selection Ciara Loftus
2025-08-11 11:36 ` Bruce Richardson
2025-08-07 12:39 ` [PATCH v2 13/15] net/ice: use the common Rx path selection infrastructure Ciara Loftus
2025-08-11 17:03 ` Bruce Richardson [this message]
2025-08-07 12:39 ` [PATCH v2 14/15] net/iavf: " Ciara Loftus
2025-08-07 12:39 ` [PATCH v2 15/15] net/i40e: " Ciara Loftus
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aJoies3-W51XiRx6@bricha3-mobl1.ger.corp.intel.com \
--to=bruce.richardson@intel.com \
--cc=ciara.loftus@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).