DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: tiwei.bie@intel.com, zhihong.wang@intel.com,
	jfreimann@redhat.com, dev@dpdk.org
Cc: mst@redhat.com, jasowang@redhat.com, wexu@redhat.com,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v6 07/15] vhost: extract split ring handling from Rx and Tx functions
Date: Mon,  2 Jul 2018 10:16:21 +0200	[thread overview]
Message-ID: <20180702081629.29258-8-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20180702081629.29258-1-maxime.coquelin@redhat.com>

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 240 +++++++++++++++++++++++-------------------
 1 file changed, 131 insertions(+), 109 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 816d5fc1d..385876527 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -226,13 +226,13 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
 }
 
 static __rte_always_inline int
-fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
-			 uint32_t avail_idx, uint32_t *vec_idx,
+fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+			 uint32_t avail_idx, uint16_t *vec_idx,
 			 struct buf_vector *buf_vec, uint16_t *desc_chain_head,
 			 uint16_t *desc_chain_len, uint8_t perm)
 {
 	uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
-	uint32_t vec_id = *vec_idx;
+	uint16_t vec_id = *vec_idx;
 	uint32_t len    = 0;
 	uint64_t dlen, desc_avail, desc_iova;
 	struct vring_desc *descs = vq->desc;
@@ -317,13 +317,13 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
  * Returns -1 on fail, 0 on success
  */
 static inline int
-reserve_avail_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				uint32_t size, struct buf_vector *buf_vec,
 				uint16_t *num_buffers, uint16_t avail_head,
 				uint16_t *nr_vec)
 {
 	uint16_t cur_idx;
-	uint32_t vec_idx = 0;
+	uint16_t vec_idx = 0;
 	uint16_t max_tries, tries = 0;
 
 	uint16_t head_idx = 0;
@@ -341,7 +341,8 @@ reserve_avail_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (unlikely(cur_idx == avail_head))
 			return -1;
 
-		if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
+		if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
+						&vec_idx, buf_vec,
 						&head_idx, &len,
 						VHOST_ACCESS_RO) < 0))
 			return -1;
@@ -528,48 +529,22 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline uint32_t
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mbuf **pkts, uint32_t count)
 {
-	struct vhost_virtqueue *vq;
 	uint32_t pkt_idx = 0;
 	uint16_t num_buffers;
 	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 	uint16_t avail_head;
 
-	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
-		RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
-			dev->vid, __func__, queue_id);
-		return 0;
-	}
-
-	vq = dev->virtqueue[queue_id];
-
-	rte_spinlock_lock(&vq->access_lock);
-
-	if (unlikely(vq->enabled == 0))
-		goto out_access_unlock;
-
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_lock(vq);
-
-	if (unlikely(vq->access_ok == 0))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
-
-	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
-	if (count == 0)
-		goto out;
-
 	rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
-
 	avail_head = *((volatile uint16_t *)&vq->avail->idx);
+
 	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
 		uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 		uint16_t nr_vec = 0;
 
-		if (unlikely(reserve_avail_buf(dev, vq,
+		if (unlikely(reserve_avail_buf_split(dev, vq,
 						pkt_len, buf_vec, &num_buffers,
 						avail_head, &nr_vec) < 0)) {
 			VHOST_LOG_DEBUG(VHOST_DATA,
@@ -602,6 +577,42 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 		vhost_vring_call(dev, vq);
 	}
 
+	return pkt_idx;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count)
+{
+	struct vhost_virtqueue *vq;
+
+	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+		RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+			dev->vid, __func__, queue_id);
+		return 0;
+	}
+
+	vq = dev->virtqueue[queue_id];
+
+	rte_spinlock_lock(&vq->access_lock);
+
+	if (unlikely(vq->enabled == 0))
+		goto out_access_unlock;
+
+	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+		vhost_user_iotlb_rd_lock(vq);
+
+	if (unlikely(vq->access_ok == 0))
+		if (unlikely(vring_translate(dev, vq) < 0))
+			goto out;
+
+	count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+	if (count == 0)
+		goto out;
+
+	count = virtio_dev_rx_split(dev, vq, pkts, count);
+
 out:
 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
 		vhost_user_iotlb_rd_unlock(vq);
@@ -609,7 +620,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 out_access_unlock:
 	rte_spinlock_unlock(&vq->access_lock);
 
-	return pkt_idx;
+	return count;
 }
 
 uint16_t
@@ -1014,48 +1025,13 @@ restore_mbuf(struct rte_mbuf *m)
 	}
 }
 
-uint16_t
-rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
 {
-	struct virtio_net *dev;
-	struct rte_mbuf *rarp_mbuf = NULL;
-	struct vhost_virtqueue *vq;
-	uint32_t i = 0;
+	uint16_t i;
 	uint16_t free_entries;
 
-	dev = get_device(vid);
-	if (!dev)
-		return 0;
-
-	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
-		RTE_LOG(ERR, VHOST_DATA,
-			"(%d) %s: built-in vhost net backend is disabled.\n",
-			dev->vid, __func__);
-		return 0;
-	}
-
-	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
-		RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
-			dev->vid, __func__, queue_id);
-		return 0;
-	}
-
-	vq = dev->virtqueue[queue_id];
-
-	if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
-		return 0;
-
-	if (unlikely(vq->enabled == 0))
-		goto out_access_unlock;
-
-	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
-		vhost_user_iotlb_rd_lock(vq);
-
-	if (unlikely(vq->access_ok == 0))
-		if (unlikely(vring_translate(dev, vq) < 0))
-			goto out;
-
 	if (unlikely(dev->dequeue_zero_copy)) {
 		struct zcopy_mbuf *zmbuf, *next;
 		int nr_updated = 0;
@@ -1082,39 +1058,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
 	rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
-	/*
-	 * Construct a RARP broadcast packet, and inject it to the "pkts"
-	 * array, to looks like that guest actually send such packet.
-	 *
-	 * Check user_send_rarp() for more information.
-	 *
-	 * broadcast_rarp shares a cacheline in the virtio_net structure
-	 * with some fields that are accessed during enqueue and
-	 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
-	 * result in false sharing between enqueue and dequeue.
-	 *
-	 * Prevent unnecessary false sharing by reading broadcast_rarp first
-	 * and only performing cmpset if the read indicates it is likely to
-	 * be set.
-	 */
-
-	if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
-			rte_atomic16_cmpset((volatile uint16_t *)
-				&dev->broadcast_rarp.cnt, 1, 0))) {
-
-		rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
-		if (rarp_mbuf == NULL) {
-			RTE_LOG(ERR, VHOST_DATA,
-				"Failed to make RARP packet.\n");
-			return 0;
-		}
-		count -= 1;
-	}
-
 	free_entries = *((volatile uint16_t *)&vq->avail->idx) -
 		vq->last_avail_idx;
 	if (free_entries == 0)
-		goto out;
+		return 0;
 
 	VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
@@ -1126,10 +1073,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 	for (i = 0; i < count; i++) {
 		struct buf_vector buf_vec[BUF_VECTOR_MAX];
 		uint16_t head_idx, dummy_len;
-		uint32_t nr_vec = 0;
+		uint16_t nr_vec = 0;
 		int err;
 
-		if (unlikely(fill_vec_buf(dev, vq,
+		if (unlikely(fill_vec_buf_split(dev, vq,
 						vq->last_avail_idx + i,
 						&nr_vec, buf_vec,
 						&head_idx, &dummy_len,
@@ -1188,6 +1135,81 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		vhost_vring_call(dev, vq);
 	}
 
+	return i;
+}
+
+uint16_t
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+	struct virtio_net *dev;
+	struct rte_mbuf *rarp_mbuf = NULL;
+	struct vhost_virtqueue *vq;
+
+	dev = get_device(vid);
+	if (!dev)
+		return 0;
+
+	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+		RTE_LOG(ERR, VHOST_DATA,
+			"(%d) %s: built-in vhost net backend is disabled.\n",
+			dev->vid, __func__);
+		return 0;
+	}
+
+	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
+		RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+			dev->vid, __func__, queue_id);
+		return 0;
+	}
+
+	vq = dev->virtqueue[queue_id];
+
+	if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+		return 0;
+
+	if (unlikely(vq->enabled == 0))
+		goto out_access_unlock;
+
+	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+		vhost_user_iotlb_rd_lock(vq);
+
+	if (unlikely(vq->access_ok == 0))
+		if (unlikely(vring_translate(dev, vq) < 0))
+			goto out;
+
+
+	/*
+	 * Construct a RARP broadcast packet, and inject it to the "pkts"
+	 * array, to looks like that guest actually send such packet.
+	 *
+	 * Check user_send_rarp() for more information.
+	 *
+	 * broadcast_rarp shares a cacheline in the virtio_net structure
+	 * with some fields that are accessed during enqueue and
+	 * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
+	 * result in false sharing between enqueue and dequeue.
+	 *
+	 * Prevent unnecessary false sharing by reading broadcast_rarp first
+	 * and only performing cmpset if the read indicates it is likely to
+	 * be set.
+	 */
+
+	if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
+			rte_atomic16_cmpset((volatile uint16_t *)
+				&dev->broadcast_rarp.cnt, 1, 0))) {
+
+		rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+		if (rarp_mbuf == NULL) {
+			RTE_LOG(ERR, VHOST_DATA,
+				"Failed to make RARP packet.\n");
+			return 0;
+		}
+		count -= 1;
+	}
+
+	count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
+
 out:
 	if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
 		vhost_user_iotlb_rd_unlock(vq);
@@ -1200,10 +1222,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		 * Inject it to the head of "pkts" array, so that switch's mac
 		 * learning table will get updated first.
 		 */
-		memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
+		memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
 		pkts[0] = rarp_mbuf;
-		i += 1;
+		count += 1;
 	}
 
-	return i;
+	return count;
 }
-- 
2.14.4

  parent reply	other threads:[~2018-07-02  8:17 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-02  8:16 [dpdk-dev] [PATCH v6 00/15] Vhost: add support to packed ring layout Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 01/15] vhost: add virtio packed virtqueue defines Maxime Coquelin
2018-07-04  5:37   ` Tiwei Bie
2018-07-04 16:03     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 02/15] vhost: add helpers for packed virtqueues Maxime Coquelin
2018-07-04  5:39   ` Tiwei Bie
2018-07-04 16:03     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 03/15] vhost: vring address setup for packed queues Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 04/15] vhost: clear shadow used table index at flush time Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 05/15] vhost: make indirect desc table copy desc type agnostic Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 06/15] vhost: clear batch copy index at copy time Maxime Coquelin
2018-07-02  8:16 ` Maxime Coquelin [this message]
2018-07-04  6:51   ` [dpdk-dev] [PATCH v6 07/15] vhost: extract split ring handling from Rx and Tx functions Tiwei Bie
2018-07-04 21:04     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 08/15] vhost: append shadow used ring function names with split Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 09/15] vhost: add shadow used ring support for packed rings Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 10/15] vhost: create descriptor mapping function Maxime Coquelin
2018-07-04  5:56   ` Tiwei Bie
2018-07-04 16:18     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 11/15] vhost: add vector filling support for packed ring Maxime Coquelin
2018-07-04  5:53   ` Tiwei Bie
2018-07-04 16:18     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 12/15] vhost: add Rx " Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 13/15] vhost: add Tx " Maxime Coquelin
2018-07-04  5:45   ` Tiwei Bie
2018-07-04 16:09     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 14/15] vhost: add notification " Maxime Coquelin
2018-07-03  5:57   ` Jason Wang
2018-07-03  6:43     ` Maxime Coquelin
2018-07-03  6:06   ` Jason Wang
2018-07-04 16:02     ` Maxime Coquelin
2018-07-04  6:25   ` Tiwei Bie
2018-07-04 20:20     ` Maxime Coquelin
2018-07-02  8:16 ` [dpdk-dev] [PATCH v6 15/15] vhost: advertize packed ring layout support Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180702081629.29258-8-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=mst@redhat.com \
    --cc=tiwei.bie@intel.com \
    --cc=wexu@redhat.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).