From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 1CBAA1B7AC for ; Mon, 29 Jan 2018 15:12:45 +0100 (CET) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.phx2.redhat.com [10.5.11.14]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 7B756C0546D0; Mon, 29 Jan 2018 14:12:44 +0000 (UTC) Received: from localhost (dhcp-192-241.str.redhat.com [10.33.192.241]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 5CB595D969; Mon, 29 Jan 2018 14:12:32 +0000 (UTC) From: Jens Freimann To: dev@dpdk.org Cc: tiwei.bie@intel.com, yliu@fridaylinux.org, maxime.coquelin@redhat.com, mst@redhat.com Date: Mon, 29 Jan 2018 15:11:36 +0100 Message-Id: <20180129141143.13437-8-jfreimann@redhat.com> In-Reply-To: <20180129141143.13437-1-jfreimann@redhat.com> References: <20180129141143.13437-1-jfreimann@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.14 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.32]); Mon, 29 Jan 2018 14:12:44 +0000 (UTC) Subject: [dpdk-dev] [PATCH 07/14] net/virtio: implement transmit path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 29 Jan 2018 14:12:46 -0000 This implements the transmit path for devices with support for Virtio 1.1. Add the feature bit for Virtio 1.1 and enable code to add buffers to vring and mark descriptors as available. This is based on a patch by Yuanhan Liu. Signed-off-by: Jens Freiman --- drivers/net/virtio/Makefile | 1 + drivers/net/virtio/virtio_ethdev.c | 11 ++- drivers/net/virtio/virtio_ethdev.h | 3 + drivers/net/virtio/virtio_rxtx.c | 7 +- drivers/net/virtio/virtio_rxtx_1.1.c | 161 +++++++++++++++++++++++++++++++++++ 5 files changed, 180 insertions(+), 3 deletions(-) create mode 100644 drivers/net/virtio/virtio_rxtx_1.1.c diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile index b215ada97..482bb2c52 100644 --- a/drivers/net/virtio/Makefile +++ b/drivers/net/virtio/Makefile @@ -27,6 +27,7 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_1.1.c SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 76879d87f..b30e0d4a9 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -394,6 +394,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; + if (vtpci_packed_queue(hw)) + vq->vq_ring.avail_wrap_counter = 1; /* * Reserve a memzone for vring elements @@ -614,7 +616,8 @@ virtio_dev_close(struct rte_eth_dev *dev) } vtpci_reset(hw); - virtio_dev_free_mbufs(dev); + if (!vtpci_packed_queue(hw)) + virtio_dev_free_mbufs(dev); virtio_free_queues(hw); } @@ -1371,7 +1374,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &virtio_recv_pkts; } - if (hw->use_simple_tx) { + if (vtpci_packed_queue(hw)) { + PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts_1_1; + } else if (hw->use_simple_tx) { PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u", eth_dev->data->port_id); eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple; diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 4539d2e44..0fee979cb 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -36,6 +36,7 @@ 1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \ 1u << VIRTIO_RING_F_INDIRECT_DESC | \ 1ULL << VIRTIO_F_VERSION_1 | \ + 1ULL << VIRTIO_F_PACKED | \ 1ULL << VIRTIO_F_IOMMU_PLATFORM) #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \ @@ -77,6 +78,8 @@ uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t virtio_xmit_pkts_1_1(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 854af399e..39489e3f6 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -541,6 +541,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); + if (vtpci_packed_queue(hw)) { + vq->vq_ring.avail_wrap_counter = 1; + } + if (hw->use_simple_tx) { for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { vq->vq_ring.avail->ring[desc_idx] = @@ -561,7 +565,8 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, vq->vq_ring.avail->ring[desc_idx] = desc_idx; } - VIRTQUEUE_DUMP(vq); + if (!vtpci_packed_queue(hw)) + VIRTQUEUE_DUMP(vq); return 0; } diff --git a/drivers/net/virtio/virtio_rxtx_1.1.c b/drivers/net/virtio/virtio_rxtx_1.1.c new file mode 100644 index 000000000..97a502212 --- /dev/null +++ b/drivers/net/virtio/virtio_rxtx_1.1.c @@ -0,0 +1,161 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virtio_logs.h" +#include "virtio_ethdev.h" +#include "virtio_pci.h" +#include "virtqueue.h" +#include "virtio_rxtx.h" +#include "virtio_ring.h" + +/* Cleanup from completed transmits. */ +static void +virtio_xmit_cleanup(struct virtqueue *vq) +{ + uint16_t idx; + uint16_t size = vq->vq_nentries; + struct vring_desc_1_1 *desc = vq->vq_ring.desc_1_1; + + idx = vq->vq_used_cons_idx & (size - 1); + while (desc_is_used(&desc[idx]) && + vq->vq_free_cnt < size) { + while (desc[idx].flags & VRING_DESC_F_NEXT) { + vq->vq_free_cnt++; + idx = ++vq->vq_used_cons_idx & (size - 1); + } + vq->vq_free_cnt++; + idx = ++vq->vq_used_cons_idx & (size - 1); + } +} + +uint16_t +virtio_xmit_pkts_1_1(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + uint16_t i; + struct vring_desc_1_1 *desc = vq->vq_ring.desc_1_1; + uint16_t idx; + struct vq_desc_extra *dxp; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + + if (likely(vq->vq_free_cnt < vq->vq_free_thresh)) + virtio_xmit_cleanup(vq); + + for (i = 0; i < nb_pkts; i++) { + struct rte_mbuf *txm = tx_pkts[i]; + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + uint16_t head_idx; + int wrap_counter; + + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { + virtio_xmit_cleanup(vq); + + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + break; + } + } + + txvq->stats.bytes += txm->pkt_len; + + vq->vq_free_cnt -= txm->nb_segs + 1; + + idx = (vq->vq_avail_idx++) & (vq->vq_nentries - 1); + head_idx = idx; + wrap_counter = vq->vq_ring.avail_wrap_counter; + + if ((vq->vq_avail_idx & (vq->vq_nentries - 1)) == 0) + toggle_wrap_counter(&vq->vq_ring); + + dxp = &vq->vq_descx[idx]; + if (dxp->cookie != NULL) + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = txm; + + desc[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + desc[idx].len = vq->hw->vtnet_hdr_size; + desc[idx].flags |= VRING_DESC_F_NEXT; + + do { + idx = (vq->vq_avail_idx++) & (vq->vq_nentries - 1); + desc[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq); + desc[idx].len = txm->data_len; + desc[idx].flags |= VRING_DESC_F_NEXT; + if (idx == (vq->vq_nentries - 1)) + toggle_wrap_counter(&vq->vq_ring); + } while ((txm = txm->next) != NULL); + + desc[idx].flags &= ~VRING_DESC_F_NEXT; + + rte_smp_wmb(); + _set_desc_avail(&desc[head_idx], wrap_counter); + } + + txvq->stats.packets += i; + txvq->stats.errors += nb_pkts - i; + + return i; +} -- 2.14.3