DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: Ciara Loftus <ciara.loftus@intel.com>
Cc: <dev@dpdk.org>
Subject: Re: [RFC PATCH 01/14] net/ice: use the same Rx path across process types
Date: Fri, 25 Jul 2025 14:40:04 +0100	[thread overview]
Message-ID: <aIOJNE9LCgb5QBDe@bricha3-mobl1.ger.corp.intel.com> (raw)
In-Reply-To: <20250725124919.3564890-2-ciara.loftus@intel.com>

On Fri, Jul 25, 2025 at 12:49:06PM +0000, Ciara Loftus wrote:
> In the interest of simplicity, let the primary process select the Rx
> path to be used by all processes using the given device.
> 
> The many logs which report individual Rx path selections have been
> consolidated into one single log.
> 
> Signed-off-by: Ciara Loftus <ciara.loftus@intel.com

Couple of small comments inline below. In general LGTM,

Acked-by: Bruce Richardson <bruce.richardson@intel.com>


> ---
>  drivers/net/intel/ice/ice_ethdev.c |   2 +
>  drivers/net/intel/ice/ice_ethdev.h |  19 ++-
>  drivers/net/intel/ice/ice_rxtx.c   | 234 ++++++++++++-----------------
>  3 files changed, 113 insertions(+), 142 deletions(-)
> 
> diff --git a/drivers/net/intel/ice/ice_ethdev.c b/drivers/net/intel/ice/ice_ethdev.c
> index 513777e372..a8c570026a 100644
> --- a/drivers/net/intel/ice/ice_ethdev.c
> +++ b/drivers/net/intel/ice/ice_ethdev.c
> @@ -3684,6 +3684,8 @@ ice_dev_configure(struct rte_eth_dev *dev)
>  	ad->rx_bulk_alloc_allowed = true;
>  	ad->tx_simple_allowed = true;
>  
> +	ad->rx_func_type = ICE_RX_DEFAULT;
> +
>  	if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
>  		dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
>  
> diff --git a/drivers/net/intel/ice/ice_ethdev.h b/drivers/net/intel/ice/ice_ethdev.h
> index 8e5799f8b4..5fda814f06 100644
> --- a/drivers/net/intel/ice/ice_ethdev.h
> +++ b/drivers/net/intel/ice/ice_ethdev.h
> @@ -191,6 +191,22 @@ enum pps_type {
>  	PPS_MAX,
>  };
>  
> +enum ice_rx_func_type {
> +	ICE_RX_DEFAULT,
> +	ICE_RX_BULK_ALLOC,
> +	ICE_RX_SCATTERED,
> +	ICE_RX_SSE,
> +	ICE_RX_AVX2,
> +	ICE_RX_AVX2_OFFLOAD,
> +	ICE_RX_SSE_SCATTERED,

SSE_SCATTERED should be immediately after SSE, I think.

> +	ICE_RX_AVX2_SCATTERED,
> +	ICE_RX_AVX2_SCATTERED_OFFLOAD,
> +	ICE_RX_AVX512,
> +	ICE_RX_AVX512_OFFLOAD,
> +	ICE_RX_AVX512_SCATTERED,
> +	ICE_RX_AVX512_SCATTERED_OFFLOAD,
> +};
> +
>  struct ice_adapter;
>  
>  /**
> @@ -637,6 +653,7 @@ struct ice_adapter {
>  	bool rx_vec_allowed;
>  	bool tx_vec_allowed;
>  	bool tx_simple_allowed;
> +	enum ice_rx_func_type rx_func_type;
>  	/* ptype mapping table */
>  	alignas(RTE_CACHE_LINE_MIN_SIZE) uint32_t ptype_tbl[ICE_MAX_PKT_TYPE];
>  	bool is_safe_mode;
> @@ -658,8 +675,6 @@ struct ice_adapter {
>  	unsigned long disabled_engine_mask;
>  	struct ice_parser *psr;
>  	/* used only on X86, zero on other Archs */
> -	bool rx_use_avx2;
> -	bool rx_use_avx512;
>  	bool tx_use_avx2;
>  	bool tx_use_avx512;
>  	bool rx_vec_offload_support;
> diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
> index da508592aa..85832d95a3 100644
> --- a/drivers/net/intel/ice/ice_rxtx.c
> +++ b/drivers/net/intel/ice/ice_rxtx.c
> @@ -3662,181 +3662,135 @@ ice_xmit_pkts_simple(void *tx_queue,
>  	return nb_tx;
>  }
>  
> +static const struct {
> +	eth_rx_burst_t pkt_burst;
> +	const char *info;
> +} ice_rx_burst_infos[] = {
> +	[ICE_RX_SCATTERED] = { ice_recv_scattered_pkts, "Scalar Scattered" },
> +	[ICE_RX_BULK_ALLOC] = { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
> +	[ICE_RX_DEFAULT] = { ice_recv_pkts, "Scalar" },
> +#ifdef RTE_ARCH_X86
> +#ifdef CC_AVX512_SUPPORT
> +	[ICE_RX_AVX512_SCATTERED] = {
> +		ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
> +	[ICE_RX_AVX512_SCATTERED_OFFLOAD] = {
> +		ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
> +	[ICE_RX_AVX512] = { ice_recv_pkts_vec_avx512, "Vector AVX512" },
> +	[ICE_RX_AVX512_OFFLOAD] = { ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
> +#endif
> +	[ICE_RX_AVX2_SCATTERED] = { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
> +	[ICE_RX_AVX2_SCATTERED_OFFLOAD] = {
> +		ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
> +	[ICE_RX_AVX2] = { ice_recv_pkts_vec_avx2, "Vector AVX2" },
> +	[ICE_RX_AVX2_OFFLOAD] = { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
> +	[ICE_RX_SSE_SCATTERED] = { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
> +	[ICE_RX_SSE] = { ice_recv_pkts_vec, "Vector SSE" },
> +#endif
> +};

Minor nit, but can we have the entries here in a defined order? Probably
best to use the order of the enum, which seems pretty logically arranged
from simplest to most complex (more or less).

> +
>  void __rte_cold
>  ice_set_rx_function(struct rte_eth_dev *dev)
>  {
>  	PMD_INIT_FUNC_TRACE();
>  	struct ice_adapter *ad =
>  		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +
> +	/* The primary process selects the rx path for all processes. */
> +	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> +		goto out;
> +
>  #ifdef RTE_ARCH_X86
>  	struct ci_rx_queue *rxq;
>  	int i;
>  	int rx_check_ret = -1;
> -
> -	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> -		ad->rx_use_avx512 = false;
> -		ad->rx_use_avx2 = false;
> -		rx_check_ret = ice_rx_vec_dev_check(dev);
> -		if (ad->ptp_ena)
> -			rx_check_ret = -1;
> -		ad->rx_vec_offload_support =
> -				(rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
> -		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
> -		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
> -			ad->rx_vec_allowed = true;
> -			for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -				rxq = dev->data->rx_queues[i];
> -				if (rxq && ice_rxq_vec_setup(rxq)) {
> -					ad->rx_vec_allowed = false;
> -					break;
> -				}
> +	bool rx_use_avx512 = false, rx_use_avx2 = false;
> +
> +	rx_check_ret = ice_rx_vec_dev_check(dev);
> +	if (ad->ptp_ena)
> +		rx_check_ret = -1;
> +	ad->rx_vec_offload_support =
> +			(rx_check_ret == ICE_VECTOR_OFFLOAD_PATH);
> +	if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
> +			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
> +		ad->rx_vec_allowed = true;
> +		for (i = 0; i < dev->data->nb_rx_queues; i++) {
> +			rxq = dev->data->rx_queues[i];
> +			if (rxq && ice_rxq_vec_setup(rxq)) {
> +				ad->rx_vec_allowed = false;
> +				break;
>  			}
> +		}
>  
> -			if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
> -			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
> -			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
> +		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
> +				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
> +				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
>  #ifdef CC_AVX512_SUPPORT
> -				ad->rx_use_avx512 = true;
> +			rx_use_avx512 = true;
>  #else
> -			PMD_DRV_LOG(NOTICE,
> -				"AVX512 is not supported in build env");
> +		PMD_DRV_LOG(NOTICE,
> +			"AVX512 is not supported in build env");
>  #endif
> -			if (!ad->rx_use_avx512 &&
> -			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
> -			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
> -			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
> -				ad->rx_use_avx2 = true;
> -
> -		} else {
> -			ad->rx_vec_allowed = false;
> -		}
> +		if (!rx_use_avx512 &&
> +				(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
> +				rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&

Not sure if this is fixed later in the series, but should not need to check
for AVX512 when deciding whether or not to choose an AVX2 path. If AVX512
is present, AVX2 will be also, so the AVX2 check is sufficient.

> +				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
> +			rx_use_avx2 = true;
> +	} else {
> +		ad->rx_vec_allowed = false;
>  	}
>  
>  	if (ad->rx_vec_allowed) {
>  		if (dev->data->scattered_rx) {
> -			if (ad->rx_use_avx512) {
> +			if (rx_use_avx512) {
>  #ifdef CC_AVX512_SUPPORT
> -				if (ad->rx_vec_offload_support) {
> -					PMD_DRV_LOG(NOTICE,
> -						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
> -						dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_scattered_pkts_vec_avx512_offload;
> -				} else {
> -					PMD_DRV_LOG(NOTICE,
> -						"Using AVX512 Vector Scattered Rx (port %d).",
> -						dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_scattered_pkts_vec_avx512;
> -				}
> +				if (ad->rx_vec_offload_support)
> +					ad->rx_func_type = ICE_RX_AVX512_SCATTERED_OFFLOAD;
> +				else
> +					ad->rx_func_type = ICE_RX_AVX512_SCATTERED;
>  #endif
> -			} else if (ad->rx_use_avx2) {
> -				if (ad->rx_vec_offload_support) {
> -					PMD_DRV_LOG(NOTICE,
> -						    "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
> -						    dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_scattered_pkts_vec_avx2_offload;
> -				} else {
> -					PMD_DRV_LOG(NOTICE,
> -						    "Using AVX2 Vector Scattered Rx (port %d).",
> -						    dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_scattered_pkts_vec_avx2;
> -				}
> +			} else if (rx_use_avx2) {
> +				if (ad->rx_vec_offload_support)
> +					ad->rx_func_type = ICE_RX_AVX2_SCATTERED_OFFLOAD;
> +				else
> +					ad->rx_func_type = ICE_RX_AVX2_SCATTERED;
>  			} else {
> -				PMD_DRV_LOG(DEBUG,
> -					"Using Vector Scattered Rx (port %d).",
> -					dev->data->port_id);
> -				dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
> +				ad->rx_func_type = ICE_RX_SSE_SCATTERED;
>  			}
>  		} else {
> -			if (ad->rx_use_avx512) {
> +			if (rx_use_avx512) {
>  #ifdef CC_AVX512_SUPPORT
> -				if (ad->rx_vec_offload_support) {
> -					PMD_DRV_LOG(NOTICE,
> -						"Using AVX512 OFFLOAD Vector Rx (port %d).",
> -						dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_pkts_vec_avx512_offload;
> -				} else {
> -					PMD_DRV_LOG(NOTICE,
> -						"Using AVX512 Vector Rx (port %d).",
> -						dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_pkts_vec_avx512;
> -				}
> +				if (ad->rx_vec_offload_support)
> +					ad->rx_func_type = ICE_RX_AVX512_OFFLOAD;
> +				else
> +					ad->rx_func_type = ICE_RX_AVX512;
>  #endif
> -			} else if (ad->rx_use_avx2) {
> -				if (ad->rx_vec_offload_support) {
> -					PMD_DRV_LOG(NOTICE,
> -						    "Using AVX2 OFFLOAD Vector Rx (port %d).",
> -						    dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_pkts_vec_avx2_offload;
> -				} else {
> -					PMD_DRV_LOG(NOTICE,
> -						    "Using AVX2 Vector Rx (port %d).",
> -						    dev->data->port_id);
> -					dev->rx_pkt_burst =
> -						ice_recv_pkts_vec_avx2;
> -				}
> +			} else if (rx_use_avx2) {
> +				if (ad->rx_vec_offload_support)
> +					ad->rx_func_type = ICE_RX_AVX2_OFFLOAD;
> +				else
> +					ad->rx_func_type = ICE_RX_AVX2;
>  			} else {
> -				PMD_DRV_LOG(DEBUG,
> -					"Using Vector Rx (port %d).",
> -					dev->data->port_id);
> -				dev->rx_pkt_burst = ice_recv_pkts_vec;
> +				ad->rx_func_type = ICE_RX_SSE;
>  			}
>  		}
> -		return;
> +		goto out;
>  	}
>  
>  #endif
>  
> -	if (dev->data->scattered_rx) {
> +	if (dev->data->scattered_rx)
>  		/* Set the non-LRO scattered function */
> -		PMD_INIT_LOG(DEBUG,
> -			     "Using a Scattered function on port %d.",
> -			     dev->data->port_id);
> -		dev->rx_pkt_burst = ice_recv_scattered_pkts;
> -	} else if (ad->rx_bulk_alloc_allowed) {
> -		PMD_INIT_LOG(DEBUG,
> -			     "Rx Burst Bulk Alloc Preconditions are "
> -			     "satisfied. Rx Burst Bulk Alloc function "
> -			     "will be used on port %d.",
> -			     dev->data->port_id);
> -		dev->rx_pkt_burst = ice_recv_pkts_bulk_alloc;
> -	} else {
> -		PMD_INIT_LOG(DEBUG,
> -			     "Rx Burst Bulk Alloc Preconditions are not "
> -			     "satisfied, Normal Rx will be used on port %d.",
> -			     dev->data->port_id);
> -		dev->rx_pkt_burst = ice_recv_pkts;
> -	}
> -}
> +		ad->rx_func_type = ICE_RX_SCATTERED;
> +	else if (ad->rx_bulk_alloc_allowed)
> +		ad->rx_func_type = ICE_RX_BULK_ALLOC;
> +	else
> +		ad->rx_func_type = ICE_RX_DEFAULT;
>  
> -static const struct {
> -	eth_rx_burst_t pkt_burst;
> -	const char *info;
> -} ice_rx_burst_infos[] = {
> -	{ ice_recv_scattered_pkts,          "Scalar Scattered" },
> -	{ ice_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc" },
> -	{ ice_recv_pkts,                    "Scalar" },
> -#ifdef RTE_ARCH_X86
> -#ifdef CC_AVX512_SUPPORT
> -	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
> -	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
> -	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
> -	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
> -#endif
> -	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
> -	{ ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
> -	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
> -	{ ice_recv_pkts_vec_avx2_offload,   "Offload Vector AVX2" },
> -	{ ice_recv_scattered_pkts_vec,      "Vector SSE Scattered" },
> -	{ ice_recv_pkts_vec,                "Vector SSE" },
> -#endif
> -};
> +out:
> +	dev->rx_pkt_burst = ice_rx_burst_infos[ad->rx_func_type].pkt_burst;
> +	PMD_DRV_LOG(NOTICE, "Using %s Rx burst function (port %d).",
> +		ice_rx_burst_infos[ad->rx_func_type].info, dev->data->port_id);
> +}
>  
>  int
>  ice_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
> -- 
> 2.34.1
> 

  reply	other threads:[~2025-07-25 13:40 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-25 12:49 [RFC PATCH 00/14] net/intel: rx path selection simplification Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 01/14] net/ice: use the same Rx path across process types Ciara Loftus
2025-07-25 13:40   ` Bruce Richardson [this message]
2025-07-25 12:49 ` [RFC PATCH 02/14] net/iavf: rename Rx/Tx function type variables Ciara Loftus
2025-07-25 13:40   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 03/14] net/iavf: use the same Rx path across process types Ciara Loftus
2025-07-25 13:41   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 04/14] net/i40e: " Ciara Loftus
2025-07-25 13:43   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 05/14] net/intel: introduce common vector capability function Ciara Loftus
2025-07-25 13:45   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 06/14] net/ice: use the new " Ciara Loftus
2025-07-25 13:56   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 07/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 08/14] net/i40e: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 09/14] net/iavf: remove redundant field from iavf adapter struct Ciara Loftus
2025-07-25 14:51   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 10/14] net/intel: introduce infrastructure for Rx path selection Ciara Loftus
2025-07-25 15:21   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 11/14] net/ice: remove unsupported Rx offload Ciara Loftus
2025-07-25 15:22   ` Bruce Richardson
2025-07-25 12:49 ` [RFC PATCH 12/14] net/ice: use the common Rx path selection infrastructure Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 13/14] net/iavf: " Ciara Loftus
2025-07-25 12:49 ` [RFC PATCH 14/14] net/i40e: " Ciara Loftus

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aIOJNE9LCgb5QBDe@bricha3-mobl1.ger.corp.intel.com \
    --to=bruce.richardson@intel.com \
    --cc=ciara.loftus@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).