DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Jens Freimann <jfreimann@redhat.com>, dev@dpdk.org
Cc: tiwei.bie@intel.com, yliu@fridaylinux.org,
	maxime.coquelin@redhat.com, mst@redhat.com
Subject: Re: [dpdk-dev] [PATCH 08/17] net/virtio: implement receive path for packed queues
Date: Mon, 26 Mar 2018 10:15:29 +0800	[thread overview]
Message-ID: <1a3552e7-f409-9143-cdd4-f7a9d741a692@redhat.com> (raw)
In-Reply-To: <20180316152120.13199-9-jfreimann@redhat.com>



On 2018年03月16日 23:21, Jens Freimann wrote:
> From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
>
> Implement the receive part here. No support for mergeable buffers yet.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
> ---
>   drivers/net/virtio/virtio_ethdev.c |   5 +-
>   drivers/net/virtio/virtio_ethdev.h |   2 +
>   drivers/net/virtio/virtio_rxtx.c   | 134 +++++++++++++++++++++++++++++++++++++
>   3 files changed, 140 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 722a2cd..888cc49 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -1352,6 +1352,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>   		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
>   			eth_dev->data->port_id);
>   		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
> +	} else if (vtpci_packed_queue(hw)) {
> +		eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
>   	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
>   		PMD_INIT_LOG(INFO,
>   			"virtio: using mergeable buffer Rx path on port %u",
> @@ -1507,7 +1509,8 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
>   
>   	/* Setting up rx_header size for the device */
>   	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> -	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> +	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> +	    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>   		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
>   	else
>   		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index cfefe4d..92c1c4f 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -72,6 +72,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>   
>   uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts);
> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> +		uint16_t nb_pkts);
>   
>   uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>   		uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index f1df004..7834747 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -31,6 +31,7 @@
>   #include "virtqueue.h"
>   #include "virtio_rxtx.h"
>   #include "virtio_rxtx_simple.h"
> +#include "virtio_ring.h"
>   
>   #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
>   #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -428,6 +429,34 @@
>   
>   	PMD_INIT_FUNC_TRACE();
>   
> +	if (vtpci_packed_queue(hw)) {
> +		struct vring_desc_packed *desc;
> +		struct vq_desc_extra *dxp;
> +
> +		for (desc_idx = 0; desc_idx < vq->vq_nentries;
> +				desc_idx++) {
> +			m = rte_mbuf_raw_alloc(rxvq->mpool);
> +			if (unlikely(m == NULL))
> +				return -ENOMEM;
> +
> +			dxp = &vq->vq_descx[desc_idx];
> +			dxp->cookie = m;
> +			dxp->ndescs = 1;
> +
> +			desc = &vq->vq_ring.desc_packed[desc_idx];
> +			desc->addr = VIRTIO_MBUF_ADDR(m, vq) +
> +				RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +			desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM +
> +				hw->vtnet_hdr_size;
> +			desc->flags |= VRING_DESC_F_WRITE;
> +			rte_smp_wmb();
> +			set_desc_avail(&vq->vq_ring, desc);
> +		}
> +		toggle_wrap_counter(&vq->vq_ring);
> +
> +		return 0;
> +	}
> +
>   	/* Allocate blank mbufs for the each rx descriptor */
>   	nbufs = 0;
>   
> @@ -702,6 +731,111 @@
>   		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
>   }
>   
> +uint16_t
> +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> +		     uint16_t nb_pkts)
> +{
> +	struct virtnet_rx *rxvq = rx_queue;
> +	struct virtqueue *vq = rxvq->vq;
> +	struct virtio_hw *hw = vq->hw;
> +	struct rte_mbuf *rxm, *nmb;
> +	uint16_t nb_rx;
> +	uint32_t len;
> +	uint32_t i;
> +	uint32_t hdr_size;
> +	int offload;
> +	struct virtio_net_hdr *hdr;
> +	struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
> +	struct vring_desc_packed *desc;
> +	uint16_t used_idx = vq->vq_used_cons_idx;
> +	struct vq_desc_extra *dxp;
> +
> +	nb_rx = 0;
> +	if (unlikely(hw->started == 0))
> +		return nb_rx;
> +
> +	hdr_size = hw->vtnet_hdr_size;
> +	offload = rx_offload_enabled(hw);
> +
> +	for (i = 0; i < nb_pkts; i++) {
> +		desc = &descs[used_idx & (vq->vq_nentries - 1)];
> +		if (!desc_is_used(desc))
> +			break;
> +
> +		rte_smp_rmb();
> +
> +		nmb = rte_mbuf_raw_alloc(rxvq->mpool);
> +		if (unlikely(nmb == NULL)) {
> +			struct rte_eth_dev *dev
> +				= &rte_eth_devices[rxvq->port_id];
> +			dev->data->rx_mbuf_alloc_failed++;
> +			break;
> +		}
> +
> +		dxp = &vq->vq_descx[used_idx & (vq->vq_nentries - 1)];
> +
> +		len = desc->len;
> +		rxm = dxp->cookie;
> +		dxp->cookie = nmb;
> +		dxp->ndescs = 1;
> +
> +		desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
> +			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +		desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
> +			hw->vtnet_hdr_size;
> +		desc->flags |= VRING_DESC_F_WRITE;
> +
> +		PMD_RX_LOG(DEBUG, "packet len:%d", len);
> +
> +		if (unlikely(len < hdr_size + ETHER_HDR_LEN)) {
> +			PMD_RX_LOG(ERR, "Packet drop");
> +			rte_pktmbuf_free(rxm);
> +			rxvq->stats.errors++;
> +			continue;
> +		}
> +
> +		rxm->port = rxvq->port_id;
> +		rxm->data_off = RTE_PKTMBUF_HEADROOM;
> +		rxm->ol_flags = 0;
> +		rxm->vlan_tci = 0;
> +
> +		rxm->pkt_len = (uint32_t)(len - hdr_size);
> +		rxm->data_len = (uint16_t)(len - hdr_size);
> +
> +		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
> +			RTE_PKTMBUF_HEADROOM - hdr_size);
> +
> +		if (hw->vlan_strip)
> +			rte_vlan_strip(rxm);
> +
> +		if (offload && virtio_rx_offload(rxm, hdr) < 0) {
> +			rte_pktmbuf_free(rxm);
> +			rxvq->stats.errors++;
> +			continue;
> +		}
> +
> +		VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
> +
> +		rxvq->stats.bytes += rxm->pkt_len;
> +		virtio_update_packet_stats(&rxvq->stats, rxm);
> +
> +		rte_smp_wmb();
> +		set_desc_avail(&vq->vq_ring, desc);
> +
> +		rx_pkts[nb_rx++] = rxm;
> +
> +		used_idx++;
> +		if ((used_idx & (vq->vq_nentries - 1)) == 0)
> +			toggle_wrap_counter(&vq->vq_ring);
> +	}
> +
> +	rxvq->stats.packets += nb_rx;

I believe we need to kick virtqueue here since we could not assume the 
backend (vhost-net) is always polling for the virtqueue.

Thanks

> +
> +	vq->vq_used_cons_idx = used_idx;
> +
> +	return nb_rx;
> +}
> +
>   #define VIRTIO_MBUF_BURST_SZ 64
>   #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
>   uint16_t

  parent reply	other threads:[~2018-03-26  2:15 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-16 15:21 [dpdk-dev] [PATCH 00/17] implement packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 01/17] net/virtio: vring init for packed queues Jens Freimann
2018-03-19  8:03   ` Tiwei Bie
2018-04-04  7:33   ` Maxime Coquelin
2018-04-04  7:48     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 02/17] net/virtio: don't call virtio_disable_intr() " Jens Freimann
2018-03-19  8:06   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 03/17] net/virtio: add virtio 1.1 defines Jens Freimann
2018-03-19  8:16   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 04/17] net/virtio: add packed virtqueue helpers Jens Freimann
2018-03-19  8:23   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 05/17] net/virtio: don't dump split virtqueue data Jens Freimann
2018-03-19  8:25   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 06/17] net/virtio-user: add option to use packed queues Jens Freimann
2018-03-19  8:33   ` Tiwei Bie
2018-03-26 10:12     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 07/17] net/virtio: implement transmit path for " Jens Freimann
2018-03-19  9:04   ` Tiwei Bie
2018-03-19  9:23     ` Jens Freimann
2018-03-26  2:18   ` Jason Wang
2018-03-16 15:21 ` [dpdk-dev] [PATCH 08/17] net/virtio: implement receive " Jens Freimann
2018-03-19 10:15   ` Tiwei Bie
2018-03-26  2:15   ` Jason Wang [this message]
2018-03-16 15:21 ` [dpdk-dev] [PATCH 09/17] vhost: add virtio 1.1 defines Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 10/17] vhost: vring address setup for packed queues Jens Freimann
2018-03-19 10:25   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 11/17] vhost: add helpers for packed virtqueues Jens Freimann
2018-03-19 10:39   ` Tiwei Bie
2018-03-21  9:17     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 12/17] vhost: dequeue for packed queues Jens Freimann
2018-03-19 10:55   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 13/17] vhost: packed queue enqueue path Jens Freimann
2018-03-19 11:02   ` Tiwei Bie
2018-03-21  8:45     ` Jens Freimann
2018-03-21  8:58       ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 14/17] vhost: enable packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 15/17] net/virtio: disable ctrl virtqueue for packed rings Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 16/17] net/virtio: add support for mergeable buffers with packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 17/17] vhost: support mergeable rx buffers with packed queues Jens Freimann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1a3552e7-f409-9143-cdd4-f7a9d741a692@redhat.com \
    --to=jasowang@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jfreimann@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mst@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=yliu@fridaylinux.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).