From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 94BD5A00C4;
	Fri, 24 Apr 2020 03:50:48 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id D80901D41B;
	Fri, 24 Apr 2020 03:49:47 +0200 (CEST)
Received: from mga06.intel.com (mga06.intel.com [134.134.136.31])
 by dpdk.org (Postfix) with ESMTP id 8B2EC1D164
 for <dev@dpdk.org>; Fri, 24 Apr 2020 03:49:43 +0200 (CEST)
IronPort-SDR: 548V17SGM1M7TUWifdic/mCalFP59kEpn110tIbxvB2DvaP88s4malb8AJv4GYvtkDsvITGiBd
 cKCTxOu8qgbQ==
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga007.jf.intel.com ([10.7.209.58])
 by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 23 Apr 2020 18:49:43 -0700
IronPort-SDR: heD3//gLvbgU3F3mpo98ncdKsxTAI+1dhir4zNGO5UypTKClp5l0zDTfAYDI9EoCxoI0+QgmSv
 ND5oWVp98VSg==
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.73,310,1583222400"; d="scan'208";a="245083937"
Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56])
 by orsmga007.jf.intel.com with ESMTP; 23 Apr 2020 18:49:41 -0700
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, xiaolong.ye@intel.com, zhihong.wang@intel.com
Cc: dev@dpdk.org, harry.van.haaren@intel.com, Marvin Liu <yong.liu@intel.com>
Date: Fri, 24 Apr 2020 17:24:44 +0800
Message-Id: <20200424092445.44693-9-yong.liu@intel.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20200424092445.44693-1-yong.liu@intel.com>
References: <20200313174230.74661-1-yong.liu@intel.com>
 <20200424092445.44693-1-yong.liu@intel.com>
Subject: [dpdk-dev] [PATCH v9 8/9] net/virtio: add election for vectorized
	path
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Rewrite vectorized path selection logic. Default setting comes from
vectorized devarg, then checks each criteria.

Packed ring vectorized path need:
    AVX512F and required extensions are supported by compiler and host
    VERSION_1 and IN_ORDER features are negotiated
    mergeable feature is not negotiated
    LRO offloading is disabled

Split ring vectorized rx path need:
    mergeable and IN_ORDER features are not negotiated
    LRO, chksum and vlan strip offloadings are disabled

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 0a69a4db1..8a9545dd8 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1523,9 +1523,12 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 	if (vtpci_packed_queue(hw)) {
 		PMD_INIT_LOG(INFO,
 			"virtio: using packed ring %s Tx path on port %u",
-			hw->use_inorder_tx ? "inorder" : "standard",
+			hw->use_vec_tx ? "vectorized" : "standard",
 			eth_dev->data->port_id);
-		eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+		if (hw->use_vec_tx)
+			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
+		else
+			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
 	} else {
 		if (hw->use_inorder_tx) {
 			PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
@@ -1539,7 +1542,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
 	}
 
 	if (vtpci_packed_queue(hw)) {
-		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+		if (hw->use_vec_rx) {
+			PMD_INIT_LOG(INFO,
+				"virtio: using packed ring vectorized Rx path on port %u",
+				eth_dev->data->port_id);
+			eth_dev->rx_pkt_burst =
+				&virtio_recv_pkts_packed_vec;
+		} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
 			PMD_INIT_LOG(INFO,
 				"virtio: using packed ring mergeable buffer Rx path on port %u",
 				eth_dev->data->port_id);
@@ -1952,8 +1961,17 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 		goto err_virtio_init;
 
 	if (vectorized) {
-		if (!vtpci_packed_queue(hw))
+		if (!vtpci_packed_queue(hw)) {
+			hw->use_vec_rx = 1;
+		} else {
+#if !defined(CC_AVX512_SUPPORT)
+			PMD_DRV_LOG(INFO,
+				"building environment do not support packed ring vectorized");
+#else
 			hw->use_vec_rx = 1;
+			hw->use_vec_tx = 1;
+#endif
+		}
 	}
 
 	hw->opened = true;
@@ -2099,11 +2117,10 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
 		}
 	}
 
-	if (vectorized &&
-		rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+	if (vectorized && rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
 		ret = rte_kvargs_process(kvlist,
-				VIRTIO_ARG_VECTORIZED,
-				vectorized_check_handler, vectorized);
+					VIRTIO_ARG_VECTORIZED,
+					vectorized_check_handler, vectorized);
 		if (ret < 0) {
 			PMD_INIT_LOG(ERR, "Failed to parse %s",
 					VIRTIO_ARG_VECTORIZED);
@@ -2288,31 +2305,61 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 			return -EBUSY;
 		}
 
-	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
-		hw->use_inorder_tx = 1;
-		hw->use_inorder_rx = 1;
-		hw->use_vec_rx = 0;
-	}
-
 	if (vtpci_packed_queue(hw)) {
-		hw->use_vec_rx = 0;
-		hw->use_inorder_rx = 0;
-	}
+		if ((hw->use_vec_rx || hw->use_vec_tx) &&
+		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
+		     !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+		     !vtpci_with_feature(hw, VIRTIO_F_VERSION_1))) {
+			PMD_DRV_LOG(INFO,
+				"disabled packed ring vectorized path for requirements not met");
+			hw->use_vec_rx = 0;
+			hw->use_vec_tx = 0;
+		}
 
+		if (hw->use_vec_rx) {
+			if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+				PMD_DRV_LOG(INFO,
+					"disabled packed ring vectorized rx for mrg_rxbuf enabled");
+				hw->use_vec_rx = 0;
+			}
+
+			if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+				PMD_DRV_LOG(INFO,
+					"disabled packed ring vectorized rx for TCP_LRO enabled");
+				hw->use_vec_rx = 0;
+			}
+		}
+	} else {
+		if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+			hw->use_inorder_tx = 1;
+			hw->use_inorder_rx = 1;
+			hw->use_vec_rx = 0;
+		}
+
+		if (hw->use_vec_rx) {
 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
-	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
-		hw->use_vec_rx = 0;
-	}
+			if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+				PMD_DRV_LOG(INFO,
+					"disabled split ring vectorized path for requirement not met");
+				hw->use_vec_rx = 0;
+			}
 #endif
-	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
-		hw->use_vec_rx = 0;
-	}
+			if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+				PMD_DRV_LOG(INFO,
+					"disabled split ring vectorized rx for mrg_rxbuf enabled");
+				hw->use_vec_rx = 0;
+			}
 
-	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_CKSUM |
-			   DEV_RX_OFFLOAD_TCP_LRO |
-			   DEV_RX_OFFLOAD_VLAN_STRIP))
-		hw->use_vec_rx = 0;
+			if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+					   DEV_RX_OFFLOAD_TCP_CKSUM |
+					   DEV_RX_OFFLOAD_TCP_LRO |
+					   DEV_RX_OFFLOAD_VLAN_STRIP)) {
+				PMD_DRV_LOG(INFO,
+					"disabled split ring vectorized rx for offloading enabled");
+				hw->use_vec_rx = 0;
+			}
+		}
+	}
 
 	return 0;
 }
-- 
2.17.1