From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id EAADE388F for ; Wed, 21 Jun 2017 04:59:35 +0200 (CEST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga105.jf.intel.com with ESMTP; 20 Jun 2017 19:59:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.39,367,1493708400"; d="scan'208";a="101830312" Received: from dpdk25.sh.intel.com ([10.67.111.75]) by orsmga002.jf.intel.com with ESMTP; 20 Jun 2017 19:59:34 -0700 From: Tiwei Bie To: dev@dpdk.org Cc: Yuanhan Liu Date: Wed, 21 Jun 2017 10:57:49 +0800 Message-Id: <1498013885-102779-14-git-send-email-tiwei.bie@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1498013885-102779-1-git-send-email-tiwei.bie@intel.com> References: <1498013885-102779-1-git-send-email-tiwei.bie@intel.com> Subject: [dpdk-dev] [RFC 13/29] net/virtio: implement the Rx code path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 21 Jun 2017 02:59:37 -0000 From: Yuanhan Liu Just make it stick to the non-mergeable code path now, though it's likely it would be really easy to add such support. Signed-off-by: Yuanhan Liu --- drivers/net/virtio/virtio_ethdev.c | 5 +- drivers/net/virtio/virtio_rxtx.c | 121 ++++++++++++++++++++++++++++++++++--- drivers/net/virtio/virtqueue.h | 1 + 3 files changed, 116 insertions(+), 11 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 35ce07d..8b754ac 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1241,7 +1241,7 @@ static void rx_func_get(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) + if (0 && vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; else eth_dev->rx_pkt_burst = &virtio_recv_pkts; @@ -1373,7 +1373,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Setting up rx_header size for the device */ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1_1)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); else hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index c49ac0d..3be64da 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -115,8 +115,8 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) dp->next = VQ_RING_DESC_CHAIN_END; } -static uint16_t -virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, +static inline uint16_t +virtqueue_dequeue_burst_rx_1_0(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) { struct vring_used_elem *uep; @@ -149,6 +149,51 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, return i; } +static inline uint16_t +virtqueue_dequeue_burst_rx_1_1(struct virtqueue *vq, struct rte_mbuf **rx_pkts, + uint32_t *len, uint16_t num) +{ + struct vring_desc_1_1 *desc = vq->vq_ring.desc_1_1; + struct rte_mbuf *cookie; + uint16_t used_idx; + uint16_t i; + + for (i = 0; i < num ; i++) { + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + if ((desc[used_idx].flags & DESC_HW)) + break; + + len[i] = desc[used_idx].len; + cookie = vq->vq_descx[used_idx].cookie; + + if (unlikely(cookie == NULL)) { + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + vq->vq_used_cons_idx); + break; + } + vq->vq_descx[used_idx].cookie = NULL; + + rte_prefetch0(cookie); + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); + rx_pkts[i] = cookie; + + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + return i; +} + +static inline uint16_t +virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, + uint32_t *len, uint16_t num) +{ + if (vtpci_version_1_1(vq->hw)) + return virtqueue_dequeue_burst_rx_1_1(vq, rx_pkts, len, num); + else + return virtqueue_dequeue_burst_rx_1_0(vq, rx_pkts, len, num); +} + #ifndef DEFAULT_TX_FREE_THRESH #define DEFAULT_TX_FREE_THRESH 32 #endif @@ -179,7 +224,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) static inline int -virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) +virtqueue_enqueue_recv_refill_1_0(struct virtqueue *vq, struct rte_mbuf *cookie) { struct vq_desc_extra *dxp; struct virtio_hw *hw = vq->hw; @@ -218,6 +263,53 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) return 0; } +static inline int +virtqueue_enqueue_recv_refill_1_1(struct virtqueue *vq, struct rte_mbuf *cookie) +{ + struct vq_desc_extra *dxp; + struct virtio_hw *hw = vq->hw; + uint16_t needed = 1; + uint16_t idx; + struct vring_desc_1_1 *desc = vq->vq_ring.desc_1_1; + + if (unlikely(vq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(vq->vq_free_cnt < needed)) + return -EMSGSIZE; + + idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1); + if (unlikely(desc[idx].flags & DESC_HW)) + return -EFAULT; + + dxp = &vq->vq_descx[idx]; + dxp->cookie = cookie; + dxp->ndescs = needed; + + desc[idx].addr = + VIRTIO_MBUF_ADDR(cookie, vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + desc[idx].len = + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; + desc[idx].flags = VRING_DESC_F_WRITE; + vq->vq_desc_head_idx++; + + vq->vq_free_cnt -= needed; + + rte_smp_wmb(); + desc[idx].flags |= DESC_HW; + + return 0; +} + +static inline int +virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) +{ + if (vtpci_version_1_1(vq->hw)) + return virtqueue_enqueue_recv_refill_1_1(vq, cookie); + else + return virtqueue_enqueue_recv_refill_1_0(vq, cookie); +} + static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed) @@ -288,9 +380,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (vtpci_version_1_1(hw)) - return 0; - if (nb_desc == 0 || nb_desc > vq->vq_nentries) nb_desc = vq->vq_nentries; vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); @@ -343,7 +432,8 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, nbufs++; } - vq_update_avail_idx(vq); + if (!vtpci_version_1_1(hw)) + vq_update_avail_idx(vq); PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); @@ -368,6 +458,10 @@ virtio_update_rxtx_handler(struct rte_eth_dev *dev, if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) use_simple_rxtx = 1; #endif + + if (vtpci_version_1_1(hw)) + use_simple_rxtx = 0; + /* Use simple rx/tx func if single segment and no offloads */ if (use_simple_rxtx && (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS && @@ -604,7 +698,16 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(hw->started == 0)) return nb_rx; - nb_used = VIRTQUEUE_NUSED(vq); + /* + * we have no idea to know how many used entries without scanning + * the desc for virtio 1.1. Thus, let's simply set nb_used to nb_pkts + * and let virtqueue_dequeue_burst_rx() to figure out the real + * number. + */ + if (vtpci_version_1_1(hw)) + nb_used = nb_pkts; + else + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); @@ -681,7 +784,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) nb_enqueued++; } - if (likely(nb_enqueued)) { + if (likely(nb_enqueued) && !vtpci_version_1_1(hw)) { vq_update_avail_idx(vq); if (unlikely(virtqueue_kick_prepare(vq))) { diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 91d2db7..45f49d7 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -272,6 +272,7 @@ vring_desc_init_1_1(struct vring *vr, int n) int i; for (i = 0; i < n; i++) { struct vring_desc_1_1 *desc = &vr->desc_1_1[i]; + desc->flags = 0; desc->index = i; } } -- 2.7.4