From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 45E741B1A3 for ; Fri, 8 Jun 2018 03:20:59 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 07 Jun 2018 18:20:58 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.49,488,1520924400"; d="scan'208";a="230856657" Received: from dpdk-test32.sh.intel.com ([10.67.119.57]) by orsmga005.jf.intel.com with ESMTP; 07 Jun 2018 18:20:57 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com Cc: zhihong.wang@intel.com, dev@dpdk.org, Marvin Liu Date: Fri, 8 Jun 2018 17:07:23 +0800 Message-Id: <20180608090724.20855-7-yong.liu@intel.com> X-Mailer: git-send-email 2.17.0 In-Reply-To: <20180608090724.20855-1-yong.liu@intel.com> References: <20180608090724.20855-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH 6/7] net/virtio: add IN_ORDER Rx/Tx into selection X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 08 Jun 2018 01:20:59 -0000 After IN_ORDER Rx/Tx paths added, need to update Rx/Tx path selection logic. Also need to handle device start up Rx queue descriptors flush action separately. Rx path select logic: If IN_ORDER is disabled will select normal Rx path. If IN_ORDER is enabled, Rx offload and merge-able are disabled will select simple Rx path. Otherwise will select IN_ORDER Rx path. Tx path select logic: If IN_ORDER is disabled will select normal Tx path. If IN_ORDER is enabled and merge-able is disabled will select simple Tx path. Otherwise will select IN_ORDER Tx path. Signed-off-by: Marvin Liu diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index df50a571a..af5eba655 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1320,6 +1320,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; + } else if (hw->use_inorder_rx) { + PMD_INIT_LOG(INFO, + "virtio: using inorder mergeable buffer Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_inorder_pkts; } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { PMD_INIT_LOG(INFO, "virtio: using mergeable buffer Rx path on port %u", @@ -1335,6 +1340,10 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u", eth_dev->data->port_id); eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple; + } else if (hw->use_inorder_tx) { + PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_inorder_pkts; } else { PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", eth_dev->data->port_id); @@ -1871,23 +1880,25 @@ virtio_dev_configure(struct rte_eth_dev *dev) rte_spinlock_init(&hw->state_lock); - hw->use_simple_rx = 1; - hw->use_simple_tx = 1; - #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { hw->use_simple_rx = 0; hw->use_simple_tx = 0; } #endif - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - hw->use_simple_rx = 0; - hw->use_simple_tx = 0; - } + if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + hw->use_inorder_rx = 1; + hw->use_inorder_tx = 1; + } else { + hw->use_simple_rx = 1; + hw->use_simple_tx = 1; + } - if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM)) - hw->use_simple_rx = 0; + if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM)) + hw->use_inorder_rx = 1; + } return 0; } diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h index 049344383..77f805df6 100644 --- a/drivers/net/virtio/virtio_pci.h +++ b/drivers/net/virtio/virtio_pci.h @@ -239,6 +239,8 @@ struct virtio_hw { uint8_t modern; uint8_t use_simple_rx; uint8_t use_simple_tx; + uint8_t use_inorder_rx; + uint8_t use_inorder_tx; uint16_t port_id; uint8_t mac_addr[ETHER_ADDR_LEN]; uint32_t notify_off_multiplier; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index d0473d6b4..b0852b721 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -604,7 +604,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) struct virtnet_rx *rxvq = &vq->rxq; struct rte_mbuf *m; uint16_t desc_idx; - int error, nbufs; + int error, nbufs, i; PMD_INIT_FUNC_TRACE(); @@ -634,6 +634,24 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) virtio_rxq_rearm_vec(rxvq); nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; } + } else if (hw->use_inorder_rx) { + if ((!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, free_cnt)) { + error = virtqueue_enqueue_inorder_refill(vq, + pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(pkts[i]); + } + } + + nbufs += free_cnt; + vq_update_avail_idx(vq); + } } else { while (!virtqueue_full(vq)) { m = rte_mbuf_raw_alloc(rxvq->mpool); diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c index a7d0a9cbe..56a77cc71 100644 --- a/drivers/net/virtio/virtqueue.c +++ b/drivers/net/virtio/virtqueue.c @@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq) desc_idx = used_idx; rte_pktmbuf_free(vq->sw_ring[desc_idx]); vq->vq_free_cnt++; + } else if (hw->use_inorder_rx) { + desc_idx = (uint16_t)uep->id; + dxp = &vq->vq_descx[desc_idx]; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + vq_ring_free_inorder(vq, desc_idx, 1); } else { desc_idx = (uint16_t)uep->id; dxp = &vq->vq_descx[desc_idx]; -- 2.17.0