From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 727FE1E2B for ; Thu, 25 Oct 2018 11:40:34 +0200 (CEST) X-Amp-Result: UNKNOWN X-Amp-Original-Verdict: FILE UNKNOWN X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Oct 2018 02:40:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.54,423,1534834800"; d="scan'208";a="102543714" Received: from btwcube1.sh.intel.com (HELO debian) ([10.67.104.158]) by orsmga001.jf.intel.com with ESMTP; 25 Oct 2018 02:40:31 -0700 Date: Thu, 25 Oct 2018 17:39:09 +0800 From: Tiwei Bie To: Jens Freimann Cc: dev@dpdk.org, maxime.coquelin@redhat.com, zhihong.wang@intel.com Message-ID: <20181025093909.GF22179@debian> References: <20181024143236.21271-1-jfreimann@redhat.com> <20181024143236.21271-7-jfreimann@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline In-Reply-To: <20181024143236.21271-7-jfreimann@redhat.com> User-Agent: Mutt/1.10.1 (2018-07-13) Subject: Re: [dpdk-dev] [PATCH v9 6/8] net/virtio: implement receive path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 25 Oct 2018 09:40:35 -0000 On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote: > Implement the receive part. > > Signed-off-by: Jens Freimann > --- > drivers/net/virtio/virtio_ethdev.c | 18 +- > drivers/net/virtio/virtio_ethdev.h | 2 + > drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++--- > drivers/net/virtio/virtqueue.c | 22 +++ > drivers/net/virtio/virtqueue.h | 2 +- > 5 files changed, 297 insertions(+), 27 deletions(-) > > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c > index d2118e6a9..7f81d24aa 100644 > --- a/drivers/net/virtio/virtio_ethdev.c > +++ b/drivers/net/virtio/virtio_ethdev.c > @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) > vq->hw = hw; > vq->vq_queue_index = vtpci_queue_idx; > vq->vq_nentries = vq_size; > - if (vtpci_packed_queue(hw)) > + if (vtpci_packed_queue(hw)) { > vq->vq_ring.avail_wrap_counter = 1; > + vq->vq_ring.used_wrap_counter = 1; Why just use used_wrap_counter in receive path? > + } > > /* > * Reserve a memzone for vring elements > @@ -1326,7 +1328,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) > { > struct virtio_hw *hw = eth_dev->data->dev_private; > > - if (hw->use_simple_rx) { > + if (vtpci_packed_queue(hw)) { > + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { > + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; > + } else { > + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; The indent is wrong. > + } > + } else if (hw->use_simple_rx) { Please make the structure more clear, e.g. if (vtpci_packed_queue(hw)) { // packed ring } else { // split ring } > PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", > eth_dev->data->port_id); > eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; > @@ -1490,7 +1498,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) > > /* Setting up rx_header size for the device */ > if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || > - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) > + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || > + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) > hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); > else > hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); > @@ -1918,7 +1927,8 @@ virtio_dev_configure(struct rte_eth_dev *dev) > > rte_spinlock_init(&hw->state_lock); > > - hw->use_simple_rx = 1; > + if (!vtpci_packed_queue(hw)) > + hw->use_simple_rx = 1; > > if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { > hw->use_inorder_tx = 1; > diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h > index 05d355180..6c9247639 100644 > --- a/drivers/net/virtio/virtio_ethdev.h > +++ b/drivers/net/virtio/virtio_ethdev.h > @@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, > > uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, > + uint16_t nb_pkts); > > uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c > index 837457243..42702527c 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -31,6 +31,7 @@ > #include "virtqueue.h" > #include "virtio_rxtx.h" > #include "virtio_rxtx_simple.h" > +#include "virtio_ring.h" > > #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP > #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) > @@ -89,7 +90,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) > } > > void > -vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx) > +vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx) > { > struct vring_desc_packed *dp; > struct vq_desc_extra *dxp = NULL, *dxp_tail = NULL; > @@ -102,7 +103,6 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx) > if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { > while (dp->flags & VRING_DESC_F_NEXT && i < dxp->ndescs) { > desc_idx_last = dxp->next; > - dp = &vq->vq_ring.desc_packed[dxp->next]; Why above line was added before this patch? > dxp = &vq->vq_descx[dxp->next]; > i++; > } > @@ -124,6 +124,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx) > dxp->next = VQ_RING_DESC_CHAIN_END; > } > > +static uint16_t > +virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, > + struct rte_mbuf **rx_pkts, > + uint32_t *len, > + uint16_t num) > +{ > + struct rte_mbuf *cookie; > + uint16_t used_idx; > + uint16_t id; > + struct vring_desc_packed *desc; > + uint16_t i; > + > + for (i = 0; i < num; i++) { > + used_idx = vq->vq_used_cons_idx; > + desc = (struct vring_desc_packed *) vq->vq_ring.desc_packed; > + if (!desc_is_used(&desc[used_idx], &vq->vq_ring)) > + return i; > + len[i] = desc[used_idx].len; > + id = desc[used_idx].index; > + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; > + > + if (unlikely(cookie == NULL)) { > + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", > + vq->vq_used_cons_idx); > + break; > + } > + rte_prefetch0(cookie); > + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); > + rx_pkts[i] = cookie; > + vq_ring_free_chain_packed(vq, id, used_idx); > + > + vq->vq_used_cons_idx += vq->vq_descx[id].ndescs; > + if (vq->vq_used_cons_idx >= vq->vq_nentries) { > + vq->vq_used_cons_idx -= vq->vq_nentries; > + vq->vq_ring.used_wrap_counter ^= 1; > + } > + } > + > + return i; > +} > + > static uint16_t > virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, > uint32_t *len, uint16_t num) > @@ -369,6 +410,55 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) > return 0; > } > > +static inline int > +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie) > +{ > + struct vq_desc_extra *dxp; > + struct virtio_hw *hw = vq->hw; > + struct vring_desc_packed *start_dp; > + uint16_t needed = 1; > + uint16_t head_idx, idx; > + uint16_t flags; > + > + if (unlikely(vq->vq_free_cnt == 0)) > + return -ENOSPC; > + if (unlikely(vq->vq_free_cnt < needed)) > + return -EMSGSIZE; > + > + head_idx = vq->vq_desc_head_idx; > + if (unlikely(head_idx >= vq->vq_nentries)) > + return -EFAULT; > + > + idx = head_idx; > + dxp = &vq->vq_descx[idx]; > + dxp->cookie = (void *)cookie; > + dxp->ndescs = needed; > + > + start_dp = vq->vq_ring.desc_packed; > + start_dp[idx].addr = > + VIRTIO_MBUF_ADDR(cookie, vq) + > + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > + start_dp[idx].len = > + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; > + flags = VRING_DESC_F_WRITE; > + flags |= VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) | > + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter); > + rte_smp_wmb(); > + start_dp[idx].flags = flags; > + idx = dxp->next; We should always use the descriptors in packed ring in the ring order (we have to use the elements in vq_descx[] out of order if device processes descriptors out of order). > + vq->vq_desc_head_idx = idx; > + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) > + vq->vq_desc_tail_idx = idx; > + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); > + > + vq->vq_avail_idx += needed; > + if (vq->vq_avail_idx >= vq->vq_nentries) { > + vq->vq_avail_idx = 0; > + vq->vq_ring.avail_wrap_counter ^= 1; > + } > + > + return 0; > +} [...] > uint16_t > virtio_recv_mergeable_pkts_inorder(void *rx_queue, > struct rte_mbuf **rx_pkts, > @@ -1385,12 +1582,20 @@ virtio_recv_mergeable_pkts(void *rx_queue, > uint16_t extra_idx; > uint32_t seg_res; > uint32_t hdr_size; > + uint32_t rx_num = 0; > > nb_rx = 0; > if (unlikely(hw->started == 0)) > return nb_rx; > > - nb_used = VIRTQUEUE_NUSED(vq); > + if (vtpci_packed_queue(vq->hw)) { > + if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx], > + &vq->vq_ring)) > + return 0; > + nb_used = VIRTIO_MBUF_BURST_SZ; > + } else { > + nb_used = VIRTQUEUE_NUSED(vq); > + } > > virtio_rmb(); > > @@ -1403,13 +1608,20 @@ virtio_recv_mergeable_pkts(void *rx_queue, > seg_res = 0; > hdr_size = hw->vtnet_hdr_size; > > + No need to add above empty line. > while (i < nb_used) { > struct virtio_net_hdr_mrg_rxbuf *header; > > if (nb_rx == nb_pkts) > break; > > - num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1); > + if (vtpci_packed_queue(vq->hw)) > + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, > + len, 1); > + else > + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1); > + if (num == 0) > + return nb_rx; > if (num != 1) > continue; > [...]