From: "Wang, Yixue" <yixue.wang@intel.com>
To: "Zhang, Qi Z" <qi.z.zhang@intel.com>,
"Yang, Qiming" <qiming.yang@intel.com>
Cc: "Zhang, Liheng" <liheng.zhang@intel.com>,
"Dong, Yao" <yao.dong@intel.com>, "dev@dpdk.org" <dev@dpdk.org>,
"stable@dpdk.org" <stable@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH] net/ice: fix wrong data path selection in secondary process
Date: Thu, 3 Jun 2021 10:03:31 +0000 [thread overview]
Message-ID: <SJ0PR11MB4799C0568EABD014E9533E4E863C9@SJ0PR11MB4799.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20210524090759.980530-1-qi.z.zhang@intel.com>
Hi, Qi
I've tested this patch and it works.
Best Regards,
Yixue.
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Monday, May 24, 2021 17:08
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: Zhang, Liheng <liheng.zhang@intel.com>; Wang, Yixue
> <yixue.wang@intel.com>; Dong, Yao <yao.dong@intel.com>; dev@dpdk.org;
> Zhang, Qi Z <qi.z.zhang@intel.com>; stable@dpdk.org
> Subject: [PATCH] net/ice: fix wrong data path selection in secondary process
>
> The flag use_avx2 and use_avx512 are defined as local variables, they will not be
> aware by the secondary process, then wrong data path is selected. Fix the issue
> by moving them into struct ice_adapter.
>
> Fixes: ae60d3c9b227 ("net/ice: support Rx AVX2 vector")
> Fixes: 2d5f6953d56d ("net/ice: support vector AVX2 in Tx")
> Fixes: 7f85d5ebcfe1 ("net/ice: add AVX512 vector path")
> Cc: stable@dpdk.org
>
> Reported-by: Yixue Wang <yixue.wang@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Tested-by: Yixue Wang <yixue.wang@intel.com>
> ---
> drivers/net/ice/ice_ethdev.h | 6 +++++
> drivers/net/ice/ice_rxtx.c | 44 ++++++++++++++++++------------------
> 2 files changed, 28 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> 2a8a8169d5..aebfd1b0b7 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -487,6 +487,12 @@ struct ice_adapter {
> struct ice_devargs devargs;
> enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
> uint16_t fdir_ref_cnt;
> +#ifdef RTE_ARCH_X86
> + bool rx_use_avx2;
> + bool rx_use_avx512;
> + bool tx_use_avx2;
> + bool tx_use_avx512;
> +#endif
> };
>
> struct ice_vsi_vlan_pvid_info {
> diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index
> 49abcb2f5c..f4f6f48d78 100644
> --- a/drivers/net/ice/ice_rxtx.c
> +++ b/drivers/net/ice/ice_rxtx.c
> @@ -3058,11 +3058,11 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> #ifdef RTE_ARCH_X86
> struct ice_rx_queue *rxq;
> int i;
> - int rx_check_ret;
> - bool use_avx512 = false;
> - bool use_avx2 = false;
> + int rx_check_ret = 0;
>
> if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> + ad->rx_use_avx512 = false;
> + ad->rx_use_avx2 = false;
> rx_check_ret = ice_rx_vec_dev_check(dev);
> if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
> rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
> { @@ -3079,16 +3079,16 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ==
> 1 &&
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW)
> == 1) #ifdef CC_AVX512_SUPPORT
> - use_avx512 = true;
> + ad->rx_use_avx512 = true;
> #else
> PMD_DRV_LOG(NOTICE,
> "AVX512 is not supported in build env"); #endif
> - if (!use_avx512 &&
> + if (!ad->rx_use_avx512 &&
> (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1
> ||
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ==
> 1) &&
> rte_vect_get_max_simd_bitwidth() >=
> RTE_VECT_SIMD_256)
> - use_avx2 = true;
> + ad->rx_use_avx2 = true;
>
> } else {
> ad->rx_vec_allowed = false;
> @@ -3097,7 +3097,7 @@ ice_set_rx_function(struct rte_eth_dev *dev)
>
> if (ad->rx_vec_allowed) {
> if (dev->data->scattered_rx) {
> - if (use_avx512) {
> + if (ad->rx_use_avx512) {
> #ifdef CC_AVX512_SUPPORT
> if (rx_check_ret ==
> ICE_VECTOR_OFFLOAD_PATH) {
> PMD_DRV_LOG(NOTICE,
> @@ -3116,14 +3116,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> } else {
> PMD_DRV_LOG(DEBUG,
> "Using %sVector Scattered Rx
> (port %d).",
> - use_avx2 ? "avx2 " : "",
> + ad->rx_use_avx2 ? "avx2 " : "",
> dev->data->port_id);
> - dev->rx_pkt_burst = use_avx2 ?
> + dev->rx_pkt_burst = ad->rx_use_avx2 ?
> ice_recv_scattered_pkts_vec_avx2 :
> ice_recv_scattered_pkts_vec;
> }
> } else {
> - if (use_avx512) {
> + if (ad->rx_use_avx512) {
> #ifdef CC_AVX512_SUPPORT
> if (rx_check_ret ==
> ICE_VECTOR_OFFLOAD_PATH) {
> PMD_DRV_LOG(NOTICE,
> @@ -3142,9 +3142,9 @@ ice_set_rx_function(struct rte_eth_dev *dev)
> } else {
> PMD_DRV_LOG(DEBUG,
> "Using %sVector Rx (port %d).",
> - use_avx2 ? "avx2 " : "",
> + ad->rx_use_avx2 ? "avx2 " : "",
> dev->data->port_id);
> - dev->rx_pkt_burst = use_avx2 ?
> + dev->rx_pkt_burst = ad->rx_use_avx2 ?
> ice_recv_pkts_vec_avx2 :
> ice_recv_pkts_vec;
> }
> @@ -3294,11 +3294,11 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> #ifdef RTE_ARCH_X86
> struct ice_tx_queue *txq;
> int i;
> - int tx_check_ret;
> - bool use_avx512 = false;
> - bool use_avx2 = false;
> + int tx_check_ret = 0;
>
> if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> + ad->tx_use_avx2 = false;
> + ad->tx_use_avx512 = false;
> tx_check_ret = ice_tx_vec_dev_check(dev);
> if (tx_check_ret >= 0 &&
> rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
> { @@ -3308,18 +3308,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ==
> 1 &&
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW)
> == 1) #ifdef CC_AVX512_SUPPORT
> - use_avx512 = true;
> + ad->tx_use_avx512 = true;
> #else
> PMD_DRV_LOG(NOTICE,
> "AVX512 is not supported in build env"); #endif
> - if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH
> &&
> + if (!ad->tx_use_avx512 && tx_check_ret ==
> ICE_VECTOR_PATH &&
> (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1
> ||
> rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ==
> 1) &&
> rte_vect_get_max_simd_bitwidth() >=
> RTE_VECT_SIMD_256)
> - use_avx2 = true;
> + ad->tx_use_avx2 = true;
>
> - if (!use_avx512 && tx_check_ret ==
> ICE_VECTOR_OFFLOAD_PATH)
> + if (!ad->tx_use_avx512 && tx_check_ret ==
> ICE_VECTOR_OFFLOAD_PATH)
> ad->tx_vec_allowed = false;
>
> if (ad->tx_vec_allowed) {
> @@ -3337,7 +3337,7 @@ ice_set_tx_function(struct rte_eth_dev *dev)
> }
>
> if (ad->tx_vec_allowed) {
> - if (use_avx512) {
> + if (ad->tx_use_avx512) {
> #ifdef CC_AVX512_SUPPORT
> if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
> PMD_DRV_LOG(NOTICE,
> @@ -3354,9 +3354,9 @@ ice_set_tx_function(struct rte_eth_dev *dev) #endif
> } else {
> PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
> - use_avx2 ? "avx2 " : "",
> + ad->tx_use_avx2 ? "avx2 " : "",
> dev->data->port_id);
> - dev->tx_pkt_burst = use_avx2 ?
> + dev->tx_pkt_burst = ad->tx_use_avx2 ?
> ice_xmit_pkts_vec_avx2 :
> ice_xmit_pkts_vec;
> }
> --
> 2.26.2
next prev parent reply other threads:[~2021-06-03 16:11 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-24 9:07 Qi Zhang
2021-06-03 10:03 ` Wang, Yixue [this message]
2021-06-03 10:20 ` Zhang, Qi Z
2022-02-22 3:55 ` Navin Srinivas
2022-02-22 4:30 ` Zhang, Qi Z
2022-02-22 4:39 ` Navin Srinivas
2022-02-22 9:21 ` Kevin Traynor
2022-02-22 12:06 ` Dong, Yao
2022-02-23 2:32 ` Zhang, Liheng
2022-02-25 10:29 ` Navin Srinivas
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=SJ0PR11MB4799C0568EABD014E9533E4E863C9@SJ0PR11MB4799.namprd11.prod.outlook.com \
--to=yixue.wang@intel.com \
--cc=dev@dpdk.org \
--cc=liheng.zhang@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=stable@dpdk.org \
--cc=yao.dong@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).