From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 0EF3E5942 for ; Fri, 21 Sep 2018 12:34:14 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx12.intmail.prod.int.phx2.redhat.com [10.5.11.27]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 725E730E6851; Fri, 21 Sep 2018 10:34:13 +0000 (UTC) Received: from localhost (dhcp-192-209.str.redhat.com [10.33.192.209]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 7ADFE89B1A; Fri, 21 Sep 2018 10:34:07 +0000 (UTC) From: Jens Freimann To: dev@dpdk.org Cc: tiwei.bie@intel.com, maxime.coquelin@redhat.com, Gavin.Hu@arm.com Date: Fri, 21 Sep 2018 12:33:04 +0200 Message-Id: <20180921103308.16357-8-jfreimann@redhat.com> In-Reply-To: <20180921103308.16357-1-jfreimann@redhat.com> References: <20180921103308.16357-1-jfreimann@redhat.com> X-Scanned-By: MIMEDefang 2.84 on 10.5.11.27 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.46]); Fri, 21 Sep 2018 10:34:13 +0000 (UTC) Subject: [dpdk-dev] [PATCH v6 07/11] net/virtio: implement receive path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 21 Sep 2018 10:34:14 -0000 Implement the receive part. Signed-off-by: Jens Freimann --- drivers/net/virtio/virtio_ethdev.c | 15 +++- drivers/net/virtio/virtio_ethdev.h | 2 + drivers/net/virtio/virtio_rxtx.c | 130 +++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 3 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 5c28af282..f03cf04a9 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; - if (vtpci_packed_queue(hw)) + if (vtpci_packed_queue(hw)) { vq->vq_ring.avail_wrap_counter = 1; + vq->vq_ring.used_wrap_counter = 1; + } /* * Reserve a memzone for vring elements @@ -1320,7 +1322,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - if (hw->use_simple_rx) { + /* + * workarount for packed vqs which don't support + * mrg_rxbuf at this point + */ + if (vtpci_packed_queue(hw)) { + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; + } else if (hw->use_simple_rx) { PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; @@ -1484,7 +1492,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Setting up rx_header size for the device */ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); else hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 04161b461..25eaff224 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -70,6 +70,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index ea6300563..90a0e306f 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -31,6 +31,7 @@ #include "virtqueue.h" #include "virtio_rxtx.h" #include "virtio_rxtx_simple.h" +#include "virtio_ring.h" #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) @@ -753,6 +754,34 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) PMD_INIT_FUNC_TRACE(); + if (vtpci_packed_queue(hw)) { + struct vring_desc_packed *desc; + struct vq_desc_extra *dxp; + + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (unlikely(m == NULL)) + return -ENOMEM; + + dxp = &vq->vq_descx[desc_idx]; + dxp->cookie = m; + dxp->ndescs = 1; + + desc = &vq->vq_ring.desc_packed[desc_idx]; + desc->addr = VIRTIO_MBUF_ADDR(m, vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + desc->flags |= VRING_DESC_F_WRITE; + rte_smp_wmb(); + set_desc_avail(&vq->vq_ring, desc); + } + vq->vq_ring.avail_wrap_counter ^= 1; + nbufs = desc_idx; + goto out; + } + /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; @@ -816,6 +845,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) vq_update_avail_idx(vq); } +out: PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); VIRTQUEUE_DUMP(vq); @@ -1034,6 +1064,106 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) return 0; } +uint16_t +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm, *nmb; + uint16_t nb_rx; + uint32_t len; + uint32_t i; + uint32_t hdr_size; + struct virtio_net_hdr *hdr; + struct vring_desc_packed *descs = vq->vq_ring.desc_packed; + struct vring_desc_packed *desc; + uint16_t used_idx, id; + struct vq_desc_extra *dxp; + + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + + hdr_size = hw->vtnet_hdr_size; + + for (i = 0; i < nb_pkts; i++) { + rte_smp_rmb(); + used_idx = vq->vq_used_cons_idx; + desc = &descs[used_idx]; + id = desc->index; + if (!desc_is_used(desc, &vq->vq_ring)) + break; + + nmb = rte_mbuf_raw_alloc(rxvq->mpool); + if (unlikely(nmb == NULL)) { + struct rte_eth_dev *dev + = &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + + dxp = &vq->vq_descx[id]; + len = desc->len; + rxm = dxp->cookie; + dxp->cookie = nmb; + dxp->ndescs = 1; + + desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + desc->flags = VRING_DESC_F_WRITE; + rte_smp_wmb(); + + PMD_RX_LOG(DEBUG, "packet len:%d", len); + + if (unlikely(len < hdr_size + ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + rte_pktmbuf_free(rxm); + rxvq->stats.errors++; + continue; + } + + rxm->port = rxvq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + rxm->pkt_len = (uint32_t)(len - hdr_size); + rxm->data_len = (uint16_t)(len - hdr_size); + + hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM - hdr_size); + + if (hw->vlan_strip) + rte_vlan_strip(rxm); + + if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { + rte_pktmbuf_free(rxm); + rxvq->stats.errors++; + continue; + } + + VIRTIO_DUMP_PACKET(rxm, rxm->data_len); + + rxvq->stats.bytes += rxm->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rxm); + + rx_pkts[nb_rx++] = rxm; + vq->vq_used_cons_idx += dxp->ndescs; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_ring.used_wrap_counter ^= 1; + } + } + + rxvq->stats.packets += nb_rx; + + return nb_rx; +} + #define VIRTIO_MBUF_BURST_SZ 64 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) uint16_t -- 2.17.1