From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id B9A6B56A1 for ; Mon, 26 Mar 2018 04:15:44 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.rdu2.redhat.com [10.11.54.3]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 2ED6A406F8BE; Mon, 26 Mar 2018 02:15:44 +0000 (UTC) Received: from [10.72.12.135] (ovpn-12-135.pek2.redhat.com [10.72.12.135]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 016E3111DCFA; Mon, 26 Mar 2018 02:15:33 +0000 (UTC) To: Jens Freimann , dev@dpdk.org Cc: tiwei.bie@intel.com, yliu@fridaylinux.org, maxime.coquelin@redhat.com, mst@redhat.com References: <20180316152120.13199-1-jfreimann@redhat.com> <20180316152120.13199-9-jfreimann@redhat.com> From: Jason Wang Message-ID: <1a3552e7-f409-9143-cdd4-f7a9d741a692@redhat.com> Date: Mon, 26 Mar 2018 10:15:29 +0800 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Thunderbird/52.6.0 MIME-Version: 1.0 In-Reply-To: <20180316152120.13199-9-jfreimann@redhat.com> Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 8bit Content-Language: en-US X-Scanned-By: MIMEDefang 2.78 on 10.11.54.3 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.7]); Mon, 26 Mar 2018 02:15:44 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.7]); Mon, 26 Mar 2018 02:15:44 +0000 (UTC) for IP:'10.11.54.3' DOMAIN:'int-mx03.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'jasowang@redhat.com' RCPT:'' Subject: Re: [dpdk-dev] [PATCH 08/17] net/virtio: implement receive path for packed queues X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 26 Mar 2018 02:15:45 -0000 On 2018年03月16日 23:21, Jens Freimann wrote: > From: Yuanhan Liu > > Implement the receive part here. No support for mergeable buffers yet. > > Signed-off-by: Jens Freimann > Signed-off-by: Yuanhan Liu > --- > drivers/net/virtio/virtio_ethdev.c | 5 +- > drivers/net/virtio/virtio_ethdev.h | 2 + > drivers/net/virtio/virtio_rxtx.c | 134 +++++++++++++++++++++++++++++++++++++ > 3 files changed, 140 insertions(+), 1 deletion(-) > > diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c > index 722a2cd..888cc49 100644 > --- a/drivers/net/virtio/virtio_ethdev.c > +++ b/drivers/net/virtio/virtio_ethdev.c > @@ -1352,6 +1352,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, > PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", > eth_dev->data->port_id); > eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; > + } else if (vtpci_packed_queue(hw)) { > + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; > } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { > PMD_INIT_LOG(INFO, > "virtio: using mergeable buffer Rx path on port %u", > @@ -1507,7 +1509,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, > > /* Setting up rx_header size for the device */ > if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || > - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) > + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || > + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) > hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); > else > hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); > diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h > index cfefe4d..92c1c4f 100644 > --- a/drivers/net/virtio/virtio_ethdev.h > +++ b/drivers/net/virtio/virtio_ethdev.h > @@ -72,6 +72,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, > > uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, > + uint16_t nb_pkts); > > uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c > index f1df004..7834747 100644 > --- a/drivers/net/virtio/virtio_rxtx.c > +++ b/drivers/net/virtio/virtio_rxtx.c > @@ -31,6 +31,7 @@ > #include "virtqueue.h" > #include "virtio_rxtx.h" > #include "virtio_rxtx_simple.h" > +#include "virtio_ring.h" > > #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP > #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) > @@ -428,6 +429,34 @@ > > PMD_INIT_FUNC_TRACE(); > > + if (vtpci_packed_queue(hw)) { > + struct vring_desc_packed *desc; > + struct vq_desc_extra *dxp; > + > + for (desc_idx = 0; desc_idx < vq->vq_nentries; > + desc_idx++) { > + m = rte_mbuf_raw_alloc(rxvq->mpool); > + if (unlikely(m == NULL)) > + return -ENOMEM; > + > + dxp = &vq->vq_descx[desc_idx]; > + dxp->cookie = m; > + dxp->ndescs = 1; > + > + desc = &vq->vq_ring.desc_packed[desc_idx]; > + desc->addr = VIRTIO_MBUF_ADDR(m, vq) + > + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > + desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM + > + hw->vtnet_hdr_size; > + desc->flags |= VRING_DESC_F_WRITE; > + rte_smp_wmb(); > + set_desc_avail(&vq->vq_ring, desc); > + } > + toggle_wrap_counter(&vq->vq_ring); > + > + return 0; > + } > + > /* Allocate blank mbufs for the each rx descriptor */ > nbufs = 0; > > @@ -702,6 +731,111 @@ > vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6); > } > > +uint16_t > +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, > + uint16_t nb_pkts) > +{ > + struct virtnet_rx *rxvq = rx_queue; > + struct virtqueue *vq = rxvq->vq; > + struct virtio_hw *hw = vq->hw; > + struct rte_mbuf *rxm, *nmb; > + uint16_t nb_rx; > + uint32_t len; > + uint32_t i; > + uint32_t hdr_size; > + int offload; > + struct virtio_net_hdr *hdr; > + struct vring_desc_packed *descs = vq->vq_ring.desc_packed; > + struct vring_desc_packed *desc; > + uint16_t used_idx = vq->vq_used_cons_idx; > + struct vq_desc_extra *dxp; > + > + nb_rx = 0; > + if (unlikely(hw->started == 0)) > + return nb_rx; > + > + hdr_size = hw->vtnet_hdr_size; > + offload = rx_offload_enabled(hw); > + > + for (i = 0; i < nb_pkts; i++) { > + desc = &descs[used_idx & (vq->vq_nentries - 1)]; > + if (!desc_is_used(desc)) > + break; > + > + rte_smp_rmb(); > + > + nmb = rte_mbuf_raw_alloc(rxvq->mpool); > + if (unlikely(nmb == NULL)) { > + struct rte_eth_dev *dev > + = &rte_eth_devices[rxvq->port_id]; > + dev->data->rx_mbuf_alloc_failed++; > + break; > + } > + > + dxp = &vq->vq_descx[used_idx & (vq->vq_nentries - 1)]; > + > + len = desc->len; > + rxm = dxp->cookie; > + dxp->cookie = nmb; > + dxp->ndescs = 1; > + > + desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) + > + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; > + desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM + > + hw->vtnet_hdr_size; > + desc->flags |= VRING_DESC_F_WRITE; > + > + PMD_RX_LOG(DEBUG, "packet len:%d", len); > + > + if (unlikely(len < hdr_size + ETHER_HDR_LEN)) { > + PMD_RX_LOG(ERR, "Packet drop"); > + rte_pktmbuf_free(rxm); > + rxvq->stats.errors++; > + continue; > + } > + > + rxm->port = rxvq->port_id; > + rxm->data_off = RTE_PKTMBUF_HEADROOM; > + rxm->ol_flags = 0; > + rxm->vlan_tci = 0; > + > + rxm->pkt_len = (uint32_t)(len - hdr_size); > + rxm->data_len = (uint16_t)(len - hdr_size); > + > + hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + > + RTE_PKTMBUF_HEADROOM - hdr_size); > + > + if (hw->vlan_strip) > + rte_vlan_strip(rxm); > + > + if (offload && virtio_rx_offload(rxm, hdr) < 0) { > + rte_pktmbuf_free(rxm); > + rxvq->stats.errors++; > + continue; > + } > + > + VIRTIO_DUMP_PACKET(rxm, rxm->data_len); > + > + rxvq->stats.bytes += rxm->pkt_len; > + virtio_update_packet_stats(&rxvq->stats, rxm); > + > + rte_smp_wmb(); > + set_desc_avail(&vq->vq_ring, desc); > + > + rx_pkts[nb_rx++] = rxm; > + > + used_idx++; > + if ((used_idx & (vq->vq_nentries - 1)) == 0) > + toggle_wrap_counter(&vq->vq_ring); > + } > + > + rxvq->stats.packets += nb_rx; I believe we need to kick virtqueue here since we could not assume the backend (vhost-net) is always polling for the virtqueue. Thanks > + > + vq->vq_used_cons_idx = used_idx; > + > + return nb_rx; > +} > + > #define VIRTIO_MBUF_BURST_SZ 64 > #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) > uint16_t