From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 6C05E4C9F for ; Wed, 12 Sep 2018 16:58:29 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.rdu2.redhat.com [10.11.54.4]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 03FD740214E2; Wed, 12 Sep 2018 14:58:29 +0000 (UTC) Received: from [10.36.112.35] (ovpn-112-35.ams2.redhat.com [10.36.112.35]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 2C9CB2026D76; Wed, 12 Sep 2018 14:58:25 +0000 (UTC) To: Jens Freimann , dev@dpdk.org Cc: tiwei.bie@intel.com References: <20180906181947.20646-1-jfreimann@redhat.com> <20180906181947.20646-8-jfreimann@redhat.com> From: Maxime Coquelin Message-ID: Date: Wed, 12 Sep 2018 16:58:24 +0200 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.9.1 MIME-Version: 1.0 In-Reply-To: <20180906181947.20646-8-jfreimann@redhat.com> Content-Type: text/plain; charset=utf-8; format=flowed Content-Language: en-US Content-Transfer-Encoding: 7bit X-Scanned-By: MIMEDefang 2.78 on 10.11.54.4 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.7]); Wed, 12 Sep 2018 14:58:29 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.7]); Wed, 12 Sep 2018 14:58:29 +0000 (UTC) for IP:'10.11.54.4' DOMAIN:'int-mx04.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'maxime.coquelin@redhat.com' RCPT:'' Subject: Re: [dpdk-dev] [PATCH v5 07/11] net/virtio: implement transmit path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 12 Sep 2018 14:58:29 -0000 On 09/06/2018 08:19 PM, Jens Freimann wrote: > This implements the transmit path for devices with > support for packed virtqueues. > > Add the feature bit and enable code to > add buffers to vring and mark descriptors as available. > > Signed-off-by: Jens Freiman > --- > drivers/net/virtio/virtio_ethdev.c | 8 +- > drivers/net/virtio/virtio_ethdev.h | 2 + > drivers/net/virtio/virtio_rxtx.c | 113 ++++++++++++++++++++++++++++- > 3 files changed, 121 insertions(+), 2 deletions(-) > > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c > index ad91f7f82..d2c5755bb 100644 > --- a/drivers/net/virtio/virtio_ethdev.c > +++ b/drivers/net/virtio/virtio_ethdev.c > @@ -384,6 +384,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) > vq->hw = hw; > vq->vq_queue_index = vtpci_queue_idx; > vq->vq_nentries = vq_size; > + if (vtpci_packed_queue(hw)) > + vq->vq_ring.avail_wrap_counter = 1; > > /* > * Reserve a memzone for vring elements > @@ -1338,7 +1340,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) > eth_dev->rx_pkt_burst = &virtio_recv_pkts; > } > > - if (hw->use_inorder_tx) { > + if (vtpci_packed_queue(hw)) { > + PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u", > + eth_dev->data->port_id); > + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; > + } else if (hw->use_inorder_tx) { > PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", > eth_dev->data->port_id); > eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; > diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h > index b726ad108..04161b461 100644 > --- a/drivers/net/virtio/virtio_ethdev.h > +++ b/drivers/net/virtio/virtio_ethdev.h > @@ -79,6 +79,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, > > uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, > uint16_t nb_pkts); > +uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, > + uint16_t nb_pkts); > > uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, > uint16_t nb_pkts); > diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c > index eb891433e..12787070e 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -38,6 +38,112 @@ > #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) > #endif > > + > +/* Cleanup from completed transmits. */ > +static void > +virtio_xmit_cleanup_packed(struct virtqueue *vq) > +{ > + uint16_t idx; > + uint16_t size = vq->vq_nentries; > + struct vring_desc_packed *desc = vq->vq_ring.desc_packed; > + struct vq_desc_extra *dxp; > + > + idx = vq->vq_used_cons_idx; > + while (desc_is_used(&desc[idx], &vq->vq_ring) && > + vq->vq_free_cnt < size) { > + dxp = &vq->vq_descx[idx]; > + vq->vq_free_cnt += dxp->ndescs; > + idx = dxp->ndescs; > + idx = idx >= size ? idx - size : idx; > + } > +} > + > +uint16_t > +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, > + uint16_t nb_pkts) > +{ > + struct virtnet_tx *txvq = tx_queue; > + struct virtqueue *vq = txvq->vq; > + uint16_t i; > + struct vring_desc_packed *desc = vq->vq_ring.desc_packed; > + uint16_t idx, prev; > + struct vq_desc_extra *dxp; > + > + if (unlikely(nb_pkts < 1)) > + return nb_pkts; > + > + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); > + > + if (likely(vq->vq_free_cnt < vq->vq_free_thresh)) > + virtio_xmit_cleanup_packed(vq); > + > + for (i = 0; i < nb_pkts; i++) { > + struct rte_mbuf *txm = tx_pkts[i]; > + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; > + uint16_t head_idx; > + int wrap_counter; > + int descs_used; > + > + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { > + virtio_xmit_cleanup_packed(vq); > + > + if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) { > + PMD_TX_LOG(ERR, > + "No free tx descriptors to transmit"); > + break; > + } > + } > + > + txvq->stats.bytes += txm->pkt_len; > + > + vq->vq_free_cnt -= txm->nb_segs + 1; > + > + wrap_counter = vq->vq_ring.avail_wrap_counter; > + idx = vq->vq_avail_idx; > + head_idx = idx; > + > + dxp = &vq->vq_descx[idx]; > + if (dxp->cookie != NULL) > + rte_pktmbuf_free(dxp->cookie); > + dxp->cookie = txm; > + > + desc[idx].addr = txvq->virtio_net_hdr_mem + > + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); > + desc[idx].len = vq->hw->vtnet_hdr_size; > + desc[idx].flags = VRING_DESC_F_NEXT | > + VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) | > + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter); > + descs_used = 1; > + > + do { > + idx = update_pq_avail_index(vq); > + desc[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq); > + desc[idx].len = txm->data_len; > + desc[idx].flags = VRING_DESC_F_NEXT | > + VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) | > + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter); > + descs_used++; > + } while ((txm = txm->next) != NULL); > + > + desc[idx].flags &= ~VRING_DESC_F_NEXT; > + > + rte_smp_wmb(); > + prev = (idx > 0 ? idx : vq->vq_nentries) - 1; > + desc[prev].index = head_idx; //FIXME //FIXIT! :) > + desc[head_idx].flags = > + (VRING_DESC_F_AVAIL(wrap_counter) | > + VRING_DESC_F_USED(!wrap_counter)); > + > + vq->vq_descx[head_idx].ndescs = descs_used; > + idx = update_pq_avail_index(vq); > + } > + > + txvq->stats.packets += i; > + txvq->stats.errors += nb_pkts - i; > + > + return i; > +} > + > int > virtio_dev_rx_queue_done(void *rxq, uint16_t offset) > { > @@ -736,7 +842,12 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, > if (hw->use_inorder_tx) > vq->vq_ring.desc[vq->vq_nentries - 1].next = 0; > > - VIRTQUEUE_DUMP(vq); > + if (vtpci_packed_queue(hw)) { > + vq->vq_ring.avail_wrap_counter = 1; > + } > + > + if (!vtpci_packed_queue(hw)) > + VIRTQUEUE_DUMP(vq); I guess the check isn't necessary anymore since support is added in patch 5. > > return 0; > } >