From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id ADEFEA00BE; Wed, 29 Apr 2020 09:28:56 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8F6B51D911; Wed, 29 Apr 2020 09:28:35 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id AC5921D901 for ; Wed, 29 Apr 2020 09:28:31 +0200 (CEST) IronPort-SDR: TY651javx27q/32pbyqPNx2jkW1ov8lwAjD1RheHblLMeDejWOtc1iP5B0scIaQwhmV0bgeDAo NdI73r9HcSGw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 29 Apr 2020 00:28:31 -0700 IronPort-SDR: UhtPBYuH7Mse5IIIW7P5wa4e3FUMEx4Ui3Ln024Eubj5fgyWH9AIbfBofPlLspqG7kGXXXDffd xTy9hVXnX18w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,330,1583222400"; d="scan'208";a="282415086" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56]) by fmsmga004.fm.intel.com with ESMTP; 29 Apr 2020 00:28:29 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, xiaolong.ye@intel.com, zhihong.wang@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Wed, 29 Apr 2020 15:28:16 +0800 Message-Id: <20200429072822.102745-4-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200429072822.102745-1-yong.liu@intel.com> References: <20200313174230.74661-1-yong.liu@intel.com> <20200429072822.102745-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v12 3/9] net/virtio: add vectorized devarg X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Previously, virtio split ring vectorized path was enabled by default. This is not suitable for everyone because that path dose not follow virtio spec. Add new devarg for virtio vectorized path selection. By default vectorized path is disabled. Signed-off-by: Marvin Liu Reviewed-by: Maxime Coquelin diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst index 6286286db..a67774e91 100644 --- a/doc/guides/nics/virtio.rst +++ b/doc/guides/nics/virtio.rst @@ -363,6 +363,13 @@ Below devargs are supported by the PCI virtio driver: rte_eth_link_get_nowait function. (Default: 10000 (10G)) +#. ``vectorized``: + + It is used to specify whether virtio device perfers to use vectorized path. + Afterwards, dependencies of vectorized path will be checked in path + election. + (Default: 0 (disabled)) + Below devargs are supported by the virtio-user vdev: #. ``path``: diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 37766cbb6..0a69a4db1 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -48,7 +48,8 @@ static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev); static uint32_t virtio_dev_speed_capa_get(uint32_t speed); static int virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa, - uint32_t *speed); + uint32_t *speed, + int *vectorized); static int virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int virtio_dev_link_update(struct rte_eth_dev *dev, @@ -1551,8 +1552,8 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; } } else { - if (hw->use_simple_rx) { - PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", + if (hw->use_vec_rx) { + PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; } else if (hw->use_inorder_rx) { @@ -1886,6 +1887,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; uint32_t speed = SPEED_UNKNOWN; + int vectorized = 0; int ret; if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) { @@ -1912,7 +1914,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) return 0; } ret = virtio_dev_devargs_parse(eth_dev->device->devargs, - NULL, &speed); + NULL, &speed, &vectorized); if (ret < 0) return ret; hw->speed = speed; @@ -1949,6 +1951,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) if (ret < 0) goto err_virtio_init; + if (vectorized) { + if (!vtpci_packed_queue(hw)) + hw->use_vec_rx = 1; + } + hw->opened = true; return 0; @@ -2021,9 +2028,20 @@ virtio_dev_speed_capa_get(uint32_t speed) } } +static int vectorized_check_handler(__rte_unused const char *key, + const char *value, void *ret_val) +{ + if (strcmp(value, "1") == 0) + *(int *)ret_val = 1; + else + *(int *)ret_val = 0; + + return 0; +} #define VIRTIO_ARG_SPEED "speed" #define VIRTIO_ARG_VDPA "vdpa" +#define VIRTIO_ARG_VECTORIZED "vectorized" static int @@ -2045,7 +2063,7 @@ link_speed_handler(const char *key __rte_unused, static int virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa, - uint32_t *speed) + uint32_t *speed, int *vectorized) { struct rte_kvargs *kvlist; int ret = 0; @@ -2081,6 +2099,18 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa, } } + if (vectorized && + rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) { + ret = rte_kvargs_process(kvlist, + VIRTIO_ARG_VECTORIZED, + vectorized_check_handler, vectorized); + if (ret < 0) { + PMD_INIT_LOG(ERR, "Failed to parse %s", + VIRTIO_ARG_VECTORIZED); + goto exit; + } + } + exit: rte_kvargs_free(kvlist); return ret; @@ -2092,7 +2122,8 @@ static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, int vdpa = 0; int ret = 0; - ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL); + ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL, + NULL); if (ret < 0) { PMD_INIT_LOG(ERR, "devargs parsing is failed"); return ret; @@ -2257,33 +2288,31 @@ virtio_dev_configure(struct rte_eth_dev *dev) return -EBUSY; } - hw->use_simple_rx = 1; - if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { hw->use_inorder_tx = 1; hw->use_inorder_rx = 1; - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; } if (vtpci_packed_queue(hw)) { - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; hw->use_inorder_rx = 0; } #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; } #endif if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; } if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_TCP_LRO | DEV_RX_OFFLOAD_VLAN_STRIP)) - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; return 0; } diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h index bd89357e4..668e688e1 100644 --- a/drivers/net/virtio/virtio_pci.h +++ b/drivers/net/virtio/virtio_pci.h @@ -253,7 +253,8 @@ struct virtio_hw { uint8_t vlan_strip; uint8_t use_msix; uint8_t modern; - uint8_t use_simple_rx; + uint8_t use_vec_rx; + uint8_t use_vec_tx; uint8_t use_inorder_rx; uint8_t use_inorder_tx; uint8_t weak_barriers; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index e450477e8..84f4cf946 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -996,7 +996,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; - if (hw->use_simple_rx) { + if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) { vq->vq_split.ring.avail->ring[desc_idx] = desc_idx; @@ -1014,7 +1014,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) &rxvq->fake_mbuf; } - if (hw->use_simple_rx) { + if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { virtio_rxq_rearm_vec(rxvq); nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index 953f00d72..150a8d987 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -525,7 +525,7 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev) */ hw->use_msix = 1; hw->modern = 0; - hw->use_simple_rx = 0; + hw->use_vec_rx = 0; hw->use_inorder_rx = 0; hw->use_inorder_tx = 0; hw->virtio_user_dev = dev; diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c index 0b4e3bf3e..ca23180de 100644 --- a/drivers/net/virtio/virtqueue.c +++ b/drivers/net/virtio/virtqueue.c @@ -32,7 +32,8 @@ virtqueue_detach_unused(struct virtqueue *vq) end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1); for (idx = 0; idx < vq->vq_nentries; idx++) { - if (hw->use_simple_rx && type == VTNET_RQ) { + if (hw->use_vec_rx && !vtpci_packed_queue(hw) && + type == VTNET_RQ) { if (start <= end && idx >= start && idx < end) continue; if (start > end && (idx >= start || idx < end)) @@ -97,7 +98,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq) for (i = 0; i < nb_used; i++) { used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); uep = &vq->vq_split.ring.used->ring[used_idx]; - if (hw->use_simple_rx) { + if (hw->use_vec_rx) { desc_idx = used_idx; rte_pktmbuf_free(vq->sw_ring[desc_idx]); vq->vq_free_cnt++; @@ -121,7 +122,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq) vq->vq_used_cons_idx++; } - if (hw->use_simple_rx) { + if (hw->use_vec_rx) { while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { virtio_rxq_rearm_vec(rxq); if (virtqueue_kick_prepare(vq)) -- 2.17.1