From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7FD0EA0588; Thu, 16 Apr 2020 16:52:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 736C61DD69; Thu, 16 Apr 2020 16:50:40 +0200 (CEST) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 8EDAC1DD44 for ; Thu, 16 Apr 2020 16:50:34 +0200 (CEST) IronPort-SDR: M/3GbJ6KC46yTIy/v+aBl6XWjvDZhGDtz+VZyOK2yb61/XoOFfr1TyVmP0uIM3xsfKNMjA14b6 JjcRsERwVNBg== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 16 Apr 2020 07:50:34 -0700 IronPort-SDR: LvC7tSA3eUQdljr7mksf4DxV0pgmc9iYlF1sOGNo1iZqMz/tb+vFtF2CipBIvR9MRlai7RkspH MlzJsaChte7g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,391,1580803200"; d="scan'208";a="454352833" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.58]) by fmsmga005.fm.intel.com with ESMTP; 16 Apr 2020 07:50:32 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, xiaolong.ye@intel.com, zhihong.wang@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Fri, 17 Apr 2020 06:24:30 +0800 Message-Id: <20200416222431.114184-9-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200416222431.114184-1-yong.liu@intel.com> References: <20200313174230.74661-1-yong.liu@intel.com> <20200416222431.114184-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v6 8/9] net/virtio: add election for vectorized path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Rewrite vectorized path selection logic. Default setting comes from RTE_LIBRTE_VIRTIO_INC_VECTOR option. Paths criteria will be checked as listed below. Packed ring vectorized path will be selected when: vectorized option is enabled AVX512F and required extensions are supported by compiler and host virtio VERSION_1 and IN_ORDER features are negotiated virtio mergeable feature is not negotiated LRO offloading is disabled Split ring vectorized rx path will be selected when: vectorized option is enabled virtio mergeable and IN_ORDER features are not negotiated LRO, chksum and vlan strip offloading are disabled Signed-off-by: Marvin Liu diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 4c7d60ca0..de4cef843 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1518,9 +1518,12 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) if (vtpci_packed_queue(hw)) { PMD_INIT_LOG(INFO, "virtio: using packed ring %s Tx path on port %u", - hw->use_inorder_tx ? "inorder" : "standard", + hw->use_vec_tx ? "vectorized" : "standard", eth_dev->data->port_id); - eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; + if (hw->use_vec_tx) + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec; + else + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; } else { if (hw->use_inorder_tx) { PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", @@ -1534,7 +1537,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) } if (vtpci_packed_queue(hw)) { - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + if (hw->use_vec_rx) { + PMD_INIT_LOG(INFO, + "virtio: using packed ring vectorized Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = + &virtio_recv_pkts_packed_vec; + } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { PMD_INIT_LOG(INFO, "virtio: using packed ring mergeable buffer Rx path on port %u", eth_dev->data->port_id); @@ -1548,7 +1557,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) } } else { if (hw->use_vec_rx) { - PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", + PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; } else if (hw->use_inorder_rx) { @@ -1921,6 +1930,10 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) goto err_virtio_init; hw->opened = true; +#ifdef RTE_LIBRTE_VIRTIO_INC_VECTOR + hw->use_vec_rx = 1; + hw->use_vec_tx = 1; +#endif return 0; @@ -2157,31 +2170,63 @@ virtio_dev_configure(struct rte_eth_dev *dev) return -EBUSY; } - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { - hw->use_inorder_tx = 1; - hw->use_inorder_rx = 1; - hw->use_vec_rx = 0; - } - if (vtpci_packed_queue(hw)) { - hw->use_vec_rx = 0; - hw->use_inorder_rx = 0; - } +#if defined RTE_ARCH_X86 + if ((hw->use_vec_rx || hw->use_vec_tx) && + (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) || + !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) || + !vtpci_with_feature(hw, VIRTIO_F_VERSION_1))) { + PMD_DRV_LOG(INFO, + "disabled packed ring vectorization for requirements are not met"); + hw->use_vec_rx = 0; + hw->use_vec_tx = 0; + } +#endif + + if (hw->use_vec_rx) { + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_DRV_LOG(INFO, + "disabled packed ring vectorized rx for mrg_rxbuf enabled"); + hw->use_vec_rx = 0; + } + if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { + PMD_DRV_LOG(INFO, + "disabled packed ring vectorized rx for TCP_LRO enabled"); + hw->use_vec_rx = 0; + } + } + } else { + if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { + hw->use_inorder_tx = 1; + hw->use_inorder_rx = 1; + hw->use_vec_rx = 0; + } + + if (hw->use_vec_rx) { #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM - if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { - hw->use_vec_rx = 0; - } + if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { + PMD_DRV_LOG(INFO, + "disabled split ring vectorization for requirements are not met"); + hw->use_vec_rx = 0; + } #endif - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - hw->use_vec_rx = 0; - } + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_DRV_LOG(INFO, + "disabled split ring vectorized rx for mrg_rxbuf enabled"); + hw->use_vec_rx = 0; + } - if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO | - DEV_RX_OFFLOAD_VLAN_STRIP)) - hw->use_vec_rx = 0; + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_TCP_LRO | + DEV_RX_OFFLOAD_VLAN_STRIP)) { + PMD_DRV_LOG(INFO, + "disabled split ring vectorized rx for offloading enabled"); + hw->use_vec_rx = 0; + } + } + } return 0; } -- 2.17.1