DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: tiwei.bie@intel.com, zhihong.wang@intel.com,
	jfreimann@redhat.com, dev@dpdk.org
Cc: mst@redhat.com, jasowang@redhat.com, wexu@redhat.com,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v5 13/15] vhost: add Tx support for packed ring
Date: Fri, 22 Jun 2018 15:43:25 +0200	[thread overview]
Message-ID: <20180622134327.18973-14-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20180622134327.18973-1-maxime.coquelin@redhat.com>

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/vhost.h      |   1 +
 lib/librte_vhost/virtio_net.c | 121 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 121 insertions(+), 1 deletion(-)

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 62d49f238..5dcb61c71 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -56,6 +56,7 @@ struct buf_vector {
 struct zcopy_mbuf {
 	struct rte_mbuf *mbuf;
 	uint32_t desc_idx;
+	uint16_t desc_count;
 	uint16_t in_use;
 
 	TAILQ_ENTRY(zcopy_mbuf) next;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index e094357be..b9f49175c 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1443,6 +1443,122 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	return i;
 }
 
+static __rte_always_inline uint16_t
+virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+	uint16_t i;
+
+	rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+
+	if (unlikely(dev->dequeue_zero_copy)) {
+		struct zcopy_mbuf *zmbuf, *next;
+		int nr_updated = 0;
+
+		for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+		     zmbuf != NULL; zmbuf = next) {
+			next = TAILQ_NEXT(zmbuf, next);
+
+			if (mbuf_is_consumed(zmbuf->mbuf)) {
+				update_shadow_used_ring_packed(vq,
+						zmbuf->desc_idx,
+						0,
+						zmbuf->desc_count);
+				nr_updated += 1;
+
+				TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+				restore_mbuf(zmbuf->mbuf);
+				rte_pktmbuf_free(zmbuf->mbuf);
+				put_zmbuf(zmbuf);
+				vq->nr_zmbuf -= 1;
+			}
+		}
+
+		flush_shadow_used_ring_packed(dev, vq);
+		vhost_vring_call(dev, vq);
+	}
+
+	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+
+	count = RTE_MIN(count, MAX_PKT_BURST);
+	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+			dev->vid, count);
+
+	for (i = 0; i < count; i++) {
+		struct buf_vector buf_vec[BUF_VECTOR_MAX];
+		uint16_t buf_id, dummy_len;
+		uint16_t desc_count, nr_vec = 0;
+		int err;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						vq->last_avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &dummy_len,
+						VHOST_ACCESS_RW) < 0))
+			break;
+
+		if (likely(dev->dequeue_zero_copy == 0))
+			update_shadow_used_ring_packed(vq, buf_id, 0,
+					desc_count);
+
+		rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
+
+		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+		if (unlikely(pkts[i] == NULL)) {
+			RTE_LOG(ERR, VHOST_DATA,
+				"Failed to allocate memory for mbuf.\n");
+			break;
+		}
+
+		err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+				mbuf_pool);
+		if (unlikely(err)) {
+			rte_pktmbuf_free(pkts[i]);
+			break;
+		}
+
+		if (unlikely(dev->dequeue_zero_copy)) {
+			struct zcopy_mbuf *zmbuf;
+
+			zmbuf = get_zmbuf(vq);
+			if (!zmbuf) {
+				rte_pktmbuf_free(pkts[i]);
+				break;
+			}
+			zmbuf->mbuf = pkts[i];
+			zmbuf->desc_idx = buf_id;
+			zmbuf->desc_count = desc_count;
+
+			/*
+			 * Pin lock the mbuf; we will check later to see
+			 * whether the mbuf is freed (when we are the last
+			 * user) or not. If that's the case, we then could
+			 * update the used ring safely.
+			 */
+			rte_mbuf_refcnt_update(pkts[i], 1);
+
+			vq->nr_zmbuf += 1;
+			TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+		}
+
+		vq->last_avail_idx += desc_count;
+		if (vq->last_avail_idx >= vq->size) {
+			vq->last_avail_idx -= vq->size;
+			vq->avail_wrap_counter ^= 1;
+		}
+	}
+
+	if (likely(dev->dequeue_zero_copy == 0)) {
+		do_data_copy_dequeue(vq);
+		if (unlikely(i < count))
+			vq->shadow_used_idx = i;
+		flush_shadow_used_ring_packed(dev, vq);
+		vhost_vring_call(dev, vq);
+	}
+
+	return i;
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1513,7 +1629,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		count -= 1;
 	}
 
-	count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
+	if (vq_is_packed(dev))
+		count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
+	else
+		count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
 
 out:
 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-- 
2.14.4

  parent reply	other threads:[~2018-06-22 13:44 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-22 13:43 [dpdk-dev] [PATCH v5 00/15] Vhost: add support to packed ring layout Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 01/15] vhost: add virtio packed virtqueue defines Maxime Coquelin
2018-06-29 15:47   ` Tiwei Bie
2018-06-29 16:20     ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 02/15] vhost: add helpers for packed virtqueues Maxime Coquelin
2018-06-29 15:51   ` Tiwei Bie
2018-06-29 16:21     ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 03/15] vhost: vring address setup for packed queues Maxime Coquelin
2018-06-29 15:59   ` Tiwei Bie
2018-06-29 16:34     ` Maxime Coquelin
2018-06-30  2:18       ` Tiwei Bie
2018-07-01  9:58     ` Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 04/15] vhost: clear shadow used table index at flush time Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 05/15] vhost: make indirect desc table copy desc type agnostic Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 06/15] vhost: clear batch copy index at copy time Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 07/15] vhost: extract split ring handling from Rx and Tx functions Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 08/15] vhost: append shadow used ring function names with split Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 09/15] vhost: add shadow used ring support for packed rings Maxime Coquelin
2018-06-29 16:08   ` Tiwei Bie
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 10/15] vhost: create descriptor mapping function Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 11/15] vhost: add vector filling support for packed ring Maxime Coquelin
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 12/15] vhost: add Rx " Maxime Coquelin
2018-06-22 13:43 ` Maxime Coquelin [this message]
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 14/15] vhost: add notification " Maxime Coquelin
2018-06-29 16:22   ` Tiwei Bie
2018-06-22 13:43 ` [dpdk-dev] [PATCH v5 15/15] vhost: advertize packed ring layout support Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180622134327.18973-14-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=mst@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=wexu@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).