DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jens Freimann <jfreimann@redhat.com>
To: dev@dpdk.org
Cc: tiwei.bie@intel.com, yliu@fridaylinux.org,
	maxime.coquelin@redhat.com, mst@redhat.com
Subject: [dpdk-dev] [PATCH 16/17] net/virtio: add support for mergeable buffers with packed virtqueues
Date: Fri, 16 Mar 2018 16:21:19 +0100	[thread overview]
Message-ID: <20180316152120.13199-17-jfreimann@redhat.com> (raw)
In-Reply-To: <20180316152120.13199-1-jfreimann@redhat.com>

Implement support for receiving merged buffers in virtio when packed virtqueues
are enabled. 

Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c |   5 +-
 drivers/net/virtio/virtio_rxtx.c   | 107 +++++++++++++++++++++++++++++++++----
 drivers/net/virtio/virtqueue.h     |   1 +
 3 files changed, 103 insertions(+), 10 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 9f372a4..d9e7399 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1360,7 +1360,10 @@ static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
 			eth_dev->data->port_id);
 		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
 	} else if (vtpci_packed_queue(hw)) {
-		eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+		if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+			eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+		else
+			eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
 		PMD_INIT_LOG(INFO,
 			"virtio: using mergeable buffer Rx path on port %u",
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 7834747..5545aa4 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -64,8 +64,8 @@
 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
 		while (dp->flags & VRING_DESC_F_NEXT) {
-			desc_idx_last = dp->next;
-			dp = &vq->vq_ring.desc[dp->next];
+			desc_idx_last = desc_idx++;
+			dp = &vq->vq_ring.desc[desc_idx];
 		}
 	}
 	dxp->ndescs = 0;
@@ -86,6 +86,76 @@
 	dp->next = VQ_RING_DESC_CHAIN_END;
 }
 
+static void
+virtio_refill_packed(struct virtqueue *vq, uint16_t used_idx, struct virtnet_rx *rxvq)
+{
+	struct vq_desc_extra *dxp;
+	struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
+	struct vring_desc_packed *desc;
+	struct rte_mbuf *nmb;
+
+	nmb = rte_mbuf_raw_alloc(rxvq->mpool);
+	if (unlikely(nmb == NULL)) {
+		struct rte_eth_dev *dev
+			= &rte_eth_devices[rxvq->port_id];
+		dev->data->rx_mbuf_alloc_failed++;
+		return;
+	}
+
+	desc = &descs[used_idx & (vq->vq_nentries - 1)];
+
+	dxp = &vq->vq_descx[used_idx & (vq->vq_nentries - 1)];
+
+	dxp->cookie = nmb;
+	dxp->ndescs = 1;
+
+	desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
+		RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
+	desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
+		vq->hw->vtnet_hdr_size;
+	desc->flags |= VRING_DESC_F_WRITE;
+
+
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
+			   uint32_t *len, uint16_t num, struct virtnet_rx *rx_queue)
+{
+	struct rte_mbuf *cookie;
+	uint16_t used_idx;
+	struct vring_desc_packed *desc;
+	uint16_t i;
+
+	for (i = 0; i < num; i++) {
+		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+		desc = &vq->vq_ring.desc_packed[used_idx];
+		if (!desc_is_used(desc))
+			return i;
+		len[i] = desc->len;
+		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+
+		if (unlikely(cookie == NULL)) {
+			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+				vq->vq_used_cons_idx);
+			break;
+		}
+		rte_prefetch0(cookie);
+		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+		rx_pkts[i] = cookie;
+
+		virtio_refill_packed(vq, used_idx, rx_queue);
+
+		rte_smp_wmb();
+		if ((vq->vq_used_cons_idx & (vq->vq_nentries - 1)) == 0)
+			toggle_wrap_counter(&vq->vq_ring);
+		set_desc_avail(&vq->vq_ring, desc);
+		vq->vq_used_cons_idx++;
+	}
+
+	return i;
+}
+
 static uint16_t
 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
 			   uint32_t *len, uint16_t num)
@@ -968,12 +1038,16 @@
 	uint32_t seg_res;
 	uint32_t hdr_size;
 	int offload;
+	uint32_t rx_num = 0;
 
 	nb_rx = 0;
 	if (unlikely(hw->started == 0))
 		return nb_rx;
 
-	nb_used = VIRTQUEUE_NUSED(vq);
+	if (vtpci_packed_queue(vq->hw))
+		nb_used = VIRTIO_MBUF_BURST_SZ; //FIXME
+	else
+		nb_used = VIRTQUEUE_NUSED(vq);
 
 	virtio_rmb();
 
@@ -987,14 +1061,23 @@
 	hdr_size = hw->vtnet_hdr_size;
 	offload = rx_offload_enabled(hw);
 
+	vq->vq_used_idx = vq->vq_used_cons_idx;
+
 	while (i < nb_used) {
 		struct virtio_net_hdr_mrg_rxbuf *header;
 
 		if (nb_rx == nb_pkts)
 			break;
 
-		num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
-		if (num != 1)
+		if (vtpci_packed_queue(vq->hw))
+			num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, 1, 
+				(struct virtnet_rx *) rx_queue);
+		else
+			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+		if (num == 0) {
+			return nb_rx;
+		}
+		if (num != 1 )
 			continue;
 
 		i++;
@@ -1045,9 +1128,13 @@
 			uint16_t  rcv_cnt =
 				RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
 			if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
-				uint32_t rx_num =
-					virtqueue_dequeue_burst_rx(vq,
-					rcv_pkts, len, rcv_cnt);
+				if (vtpci_packed_queue(vq->hw))
+					rx_num = virtqueue_dequeue_burst_rx_packed(vq,
+							     rcv_pkts, len, rcv_cnt,
+							     (struct virtnet_rx *) rx_queue);
+				else
+					rx_num = virtqueue_dequeue_burst_rx(vq,
+							      rcv_pkts, len, rcv_cnt);
 				i += rx_num;
 				rcv_cnt = rx_num;
 			} else {
@@ -1091,6 +1178,9 @@
 
 	rxvq->stats.packets += nb_rx;
 
+	if (vtpci_packed_queue(vq->hw))
+		return nb_rx;
+
 	/* Allocate new mbuf for the used descriptor */
 	error = ENOSPC;
 	while (likely(!virtqueue_full(vq))) {
@@ -1111,7 +1201,6 @@
 
 	if (likely(nb_enqueued)) {
 		vq_update_avail_idx(vq);
-
 		if (unlikely(virtqueue_kick_prepare(vq))) {
 			virtqueue_notify(vq);
 			PMD_RX_LOG(DEBUG, "Notified");
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 82160ca..13cadf8 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -171,6 +171,7 @@ struct virtqueue {
 	 * trails vq_ring.used->idx.
 	 */
 	uint16_t vq_used_cons_idx;
+	uint16_t vq_used_idx;
 	uint16_t vq_nentries;  /**< vring desc numbers */
 	uint16_t vq_free_cnt;  /**< num of desc available */
 	uint16_t vq_avail_idx; /**< sync until needed */
-- 
1.8.3.1

  parent reply	other threads:[~2018-03-16 15:22 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-16 15:21 [dpdk-dev] [PATCH 00/17] implement " Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 01/17] net/virtio: vring init for packed queues Jens Freimann
2018-03-19  8:03   ` Tiwei Bie
2018-04-04  7:33   ` Maxime Coquelin
2018-04-04  7:48     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 02/17] net/virtio: don't call virtio_disable_intr() " Jens Freimann
2018-03-19  8:06   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 03/17] net/virtio: add virtio 1.1 defines Jens Freimann
2018-03-19  8:16   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 04/17] net/virtio: add packed virtqueue helpers Jens Freimann
2018-03-19  8:23   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 05/17] net/virtio: don't dump split virtqueue data Jens Freimann
2018-03-19  8:25   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 06/17] net/virtio-user: add option to use packed queues Jens Freimann
2018-03-19  8:33   ` Tiwei Bie
2018-03-26 10:12     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 07/17] net/virtio: implement transmit path for " Jens Freimann
2018-03-19  9:04   ` Tiwei Bie
2018-03-19  9:23     ` Jens Freimann
2018-03-26  2:18   ` Jason Wang
2018-03-16 15:21 ` [dpdk-dev] [PATCH 08/17] net/virtio: implement receive " Jens Freimann
2018-03-19 10:15   ` Tiwei Bie
2018-03-26  2:15   ` Jason Wang
2018-03-16 15:21 ` [dpdk-dev] [PATCH 09/17] vhost: add virtio 1.1 defines Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 10/17] vhost: vring address setup for packed queues Jens Freimann
2018-03-19 10:25   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 11/17] vhost: add helpers for packed virtqueues Jens Freimann
2018-03-19 10:39   ` Tiwei Bie
2018-03-21  9:17     ` Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 12/17] vhost: dequeue for packed queues Jens Freimann
2018-03-19 10:55   ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 13/17] vhost: packed queue enqueue path Jens Freimann
2018-03-19 11:02   ` Tiwei Bie
2018-03-21  8:45     ` Jens Freimann
2018-03-21  8:58       ` Tiwei Bie
2018-03-16 15:21 ` [dpdk-dev] [PATCH 14/17] vhost: enable packed virtqueues Jens Freimann
2018-03-16 15:21 ` [dpdk-dev] [PATCH 15/17] net/virtio: disable ctrl virtqueue for packed rings Jens Freimann
2018-03-16 15:21 ` Jens Freimann [this message]
2018-03-16 15:21 ` [dpdk-dev] [PATCH 17/17] vhost: support mergeable rx buffers with packed queues Jens Freimann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180316152120.13199-17-jfreimann@redhat.com \
    --to=jfreimann@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=mst@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=yliu@fridaylinux.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).