From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 53BD1A09E9;
	Tue, 15 Dec 2020 03:41:13 +0100 (CET)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id ECA75C974;
	Tue, 15 Dec 2020 03:40:58 +0100 (CET)
Received: from mga06.intel.com (mga06.intel.com [134.134.136.31])
 by dpdk.org (Postfix) with ESMTP id A5112C96E
 for <dev@dpdk.org>; Tue, 15 Dec 2020 03:40:55 +0100 (CET)
IronPort-SDR: gVv+7Jg2T9Fp881cGrJzJEW0IkFBks3I4HT62LVX4Fw2TjQ+BziSek/a0PR/KLhvhXVfOnjqfL
 Cj/899602uVw==
X-IronPort-AV: E=McAfee;i="6000,8403,9835"; a="236401088"
X-IronPort-AV: E=Sophos;i="5.78,420,1599548400"; d="scan'208";a="236401088"
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 14 Dec 2020 18:40:55 -0800
IronPort-SDR: tnaOe7BPZxm/VdhTxkGu2ZEY533NdQavpQFCOvr7txifeQYGvqekUiIqT6Tty5QALvQrGHHyqV
 kYtRPHdNxO6g==
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.78,420,1599548400"; d="scan'208";a="335899907"
Received: from dpdk-lrong-srv-04.sh.intel.com ([10.67.119.221])
 by fmsmga008.fm.intel.com with ESMTP; 14 Dec 2020 18:40:52 -0800
From: Leyi Rong <leyi.rong@intel.com>
To: qi.z.zhang@intel.com,
	bruce.richardson@intel.com,
	beilei.xing@intel.com
Cc: dev@dpdk.org,
	Leyi Rong <leyi.rong@intel.com>
Date: Tue, 15 Dec 2020 10:19:43 +0800
Message-Id: <20201215021945.103396-2-leyi.rong@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20201215021945.103396-1-leyi.rong@intel.com>
References: <20201215021945.103396-1-leyi.rong@intel.com>
Subject: [dpdk-dev] [PATCH 1/3] net/i40e: remove devarg
	use-latest-supported-vec
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

As eal parameter --force-max-simd-bitwidth is already introduced,
to make it more clear when setting rx/tx function, remove
devarg use-latest-supported-vec support.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
---
 doc/guides/nics/i40e.rst       |   9 ---
 drivers/net/i40e/i40e_ethdev.c |  63 +------------------
 drivers/net/i40e/i40e_ethdev.h |   3 -
 drivers/net/i40e/i40e_rxtx.c   | 107 +++++++++++----------------------
 4 files changed, 35 insertions(+), 147 deletions(-)

diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 4e5c4679b8..90fb8a4d6f 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -209,15 +209,6 @@ Runtime Config Options
   Currently hot-plugging of representor ports is not supported so all required
   representors must be specified on the creation of the PF.
 
-- ``Use latest supported vector`` (default ``disable``)
-
-  Latest supported vector path may not always get the best perf so vector path was
-  recommended to use only on later platform. But users may want the latest vector path
-  since it can get better perf in some real work loading cases. So ``devargs`` param
-  ``use-latest-supported-vec`` is introduced, for example::
-
-  -a 84:00.0,use-latest-supported-vec=1
-
 - ``Enable validation for VF message`` (default ``not enabled``)
 
   The PF counts messages from each VF. If in any period of seconds the message
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index f54769c29d..223eb9950b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -44,7 +44,6 @@
 #define ETH_I40E_FLOATING_VEB_LIST_ARG	"floating_veb_list"
 #define ETH_I40E_SUPPORT_MULTI_DRIVER	"support-multi-driver"
 #define ETH_I40E_QUEUE_NUM_PER_VF_ARG	"queue-num-per-vf"
-#define ETH_I40E_USE_LATEST_VEC	"use-latest-supported-vec"
 #define ETH_I40E_VF_MSG_CFG		"vf_msg_cfg"
 
 #define I40E_CLEAR_PXE_WAIT_MS     200
@@ -403,7 +402,6 @@ static const char *const valid_keys[] = {
 	ETH_I40E_FLOATING_VEB_LIST_ARG,
 	ETH_I40E_SUPPORT_MULTI_DRIVER,
 	ETH_I40E_QUEUE_NUM_PER_VF_ARG,
-	ETH_I40E_USE_LATEST_VEC,
 	ETH_I40E_VF_MSG_CFG,
 	NULL};
 
@@ -1301,62 +1299,6 @@ i40e_aq_debug_write_global_register(struct i40e_hw *hw,
 	return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
 }
 
-static int
-i40e_parse_latest_vec_handler(__rte_unused const char *key,
-				const char *value,
-				void *opaque)
-{
-	struct i40e_adapter *ad = opaque;
-	int use_latest_vec;
-
-	use_latest_vec = atoi(value);
-
-	if (use_latest_vec != 0 && use_latest_vec != 1)
-		PMD_DRV_LOG(WARNING, "Value should be 0 or 1, set it as 1!");
-
-	ad->use_latest_vec = (uint8_t)use_latest_vec;
-
-	return 0;
-}
-
-static int
-i40e_use_latest_vec(struct rte_eth_dev *dev)
-{
-	struct i40e_adapter *ad =
-		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-	struct rte_kvargs *kvlist;
-	int kvargs_count;
-
-	ad->use_latest_vec = false;
-
-	if (!dev->device->devargs)
-		return 0;
-
-	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
-	if (!kvlist)
-		return -EINVAL;
-
-	kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_USE_LATEST_VEC);
-	if (!kvargs_count) {
-		rte_kvargs_free(kvlist);
-		return 0;
-	}
-
-	if (kvargs_count > 1)
-		PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
-			    "the first invalid or last valid one is used !",
-			    ETH_I40E_USE_LATEST_VEC);
-
-	if (rte_kvargs_process(kvlist, ETH_I40E_USE_LATEST_VEC,
-				i40e_parse_latest_vec_handler, ad) < 0) {
-		rte_kvargs_free(kvlist);
-		return -EINVAL;
-	}
-
-	rte_kvargs_free(kvlist);
-	return 0;
-}
-
 static int
 read_vf_msg_config(__rte_unused const char *key,
 			       const char *value,
@@ -1507,8 +1449,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
 	i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
 	/* Check if need to support multi-driver */
 	i40e_support_multi_driver(dev);
-	/* Check if users want the latest supported vec path */
-	i40e_use_latest_vec(dev);
 
 	/* Make sure all is clean before doing PF reset */
 	i40e_clear_hw(hw);
@@ -13010,5 +12950,4 @@ RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
 			      ETH_I40E_FLOATING_VEB_ARG "=1"
 			      ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
 			      ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
-			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1"
-			      ETH_I40E_USE_LATEST_VEC "=0|1");
+			      ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 696c5aaf7e..70e6ba610b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1285,9 +1285,6 @@ struct i40e_adapter {
 	uint64_t flow_types_mask;
 	uint64_t pctypes_mask;
 
-	/* For devargs */
-	uint8_t use_latest_vec;
-
 	/* For RSS reta table update */
 	uint8_t rss_reta_updated;
 };
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 5df9a9df56..2910619fa5 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3095,43 +3095,13 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.offloads = txq->offloads;
 }
 
-static eth_rx_burst_t
-i40e_get_latest_rx_vec(bool scatter)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
-		return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
-				 i40e_recv_pkts_vec_avx2;
-#endif
-	return scatter ? i40e_recv_scattered_pkts_vec :
-			 i40e_recv_pkts_vec;
-}
-
-static eth_rx_burst_t
-i40e_get_recommend_rx_vec(bool scatter)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
-	/*
-	 * since AVX frequency can be different to base frequency, limit
-	 * use of AVX2 version to later plaforms, not all those that could
-	 * theoretically run it.
-	 */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
-		return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
-				 i40e_recv_pkts_vec_avx2;
-#endif
-	return scatter ? i40e_recv_scattered_pkts_vec :
-			 i40e_recv_pkts_vec;
-}
-
 void __rte_cold
 i40e_set_rx_function(struct rte_eth_dev *dev)
 {
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	uint16_t rx_using_sse, i;
+	bool use_avx2 = false;
 	/* In order to allow Vector Rx there are a few configuration
 	 * conditions to be met and Rx Bulk Allocation should be allowed.
 	 */
@@ -3154,20 +3124,33 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
 					break;
 				}
 			}
+
+			if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+					rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+				use_avx2 = true;
 		}
 	}
 
 	if (ad->rx_vec_allowed  &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
-		/* Vec Rx path */
-		PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
+		if (dev->data->scattered_rx) {
+			PMD_INIT_LOG(DEBUG,
+				"Using %sVector Scattered Rx (port %d).",
+				use_avx2 ? "avx2 " : "",
 				dev->data->port_id);
-		if (ad->use_latest_vec)
-			dev->rx_pkt_burst =
-			i40e_get_latest_rx_vec(dev->data->scattered_rx);
-		else
-			dev->rx_pkt_burst =
-			i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+			dev->rx_pkt_burst = use_avx2 ?
+				i40e_recv_scattered_pkts_vec_avx2 :
+				i40e_recv_scattered_pkts_vec;
+		} else {
+			PMD_INIT_LOG(DEBUG,
+				"Using %sVector Rx (port %d).",
+				use_avx2 ? "avx2 " : "",
+				dev->data->port_id);
+			dev->rx_pkt_burst = use_avx2 ?
+				i40e_recv_pkts_vec_avx2 :
+				i40e_recv_pkts_vec;
+		}
 	} else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
 		PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
 				    "satisfied. Rx Burst Bulk Alloc function "
@@ -3268,39 +3251,13 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
 				txq->queue_id);
 }
 
-static eth_tx_burst_t
-i40e_get_latest_tx_vec(void)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
-		return i40e_xmit_pkts_vec_avx2;
-#endif
-	return i40e_xmit_pkts_vec;
-}
-
-static eth_tx_burst_t
-i40e_get_recommend_tx_vec(void)
-{
-#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
-	/*
-	 * since AVX frequency can be different to base frequency, limit
-	 * use of AVX2 version to later plaforms, not all those that could
-	 * theoretically run it.
-	 */
-	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
-			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
-		return i40e_xmit_pkts_vec_avx2;
-#endif
-	return i40e_xmit_pkts_vec;
-}
-
 void __rte_cold
 i40e_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct i40e_adapter *ad =
 		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	int i;
+	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 		if (ad->tx_vec_allowed) {
@@ -3313,19 +3270,23 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
 					break;
 				}
 			}
+
+			if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+			     rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+					rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
+				use_avx2 = true;
 		}
 	}
 
 	if (ad->tx_simple_allowed) {
 		if (ad->tx_vec_allowed &&
 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
-			PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
-			if (ad->use_latest_vec)
-				dev->tx_pkt_burst =
-					i40e_get_latest_tx_vec();
-			else
-				dev->tx_pkt_burst =
-					i40e_get_recommend_tx_vec();
+			PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
+				     use_avx2 ? "avx2 " : "",
+				     dev->data->port_id);
+			dev->tx_pkt_burst = use_avx2 ?
+					    i40e_xmit_pkts_vec_avx2 :
+					    i40e_xmit_pkts_vec;
 		} else {
 			PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
 			dev->tx_pkt_burst = i40e_xmit_pkts_simple;
-- 
2.17.1