DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost
@ 2021-03-17  8:54 Cheng Jiang
  2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-03-17  8:54 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

For now async vhost data path only supports split ring structure. In
order to make async vhost compatible with virtio 1.1 spec this patch
enables packed ring in async vhost data path.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/librte_vhost/rte_vhost_async.h |   1 +
 lib/librte_vhost/vhost.c           |  15 +-
 lib/librte_vhost/vhost.h           |   7 +-
 lib/librte_vhost/virtio_net.c      | 449 +++++++++++++++++++++++++++--
 4 files changed, 436 insertions(+), 36 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost_async.h b/lib/librte_vhost/rte_vhost_async.h
index c855ff875..29de5df8c 100644
--- a/lib/librte_vhost/rte_vhost_async.h
+++ b/lib/librte_vhost/rte_vhost_async.h
@@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
 struct async_inflight_info {
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
+	uint16_t nr_buffers; /* num of buffers inflight for packed ring*/
 };
 
 /**
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 52ab93d1e..445a9f327 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -1603,9 +1603,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 		return -1;
 
 	/* packed queue is not supported */
-	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+	if (unlikely(!f.async_inorder)) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on packed queue or non-inorder mode "
+			"async copy is not supported on non-inorder mode "
 			"(vid %d, qid: %d)\n", vid, queue_id);
 		return -1;
 	}
@@ -1643,10 +1643,17 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->vec_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
 			RTE_CACHE_LINE_SIZE, node);
-	vq->async_descs_split = rte_malloc_socket(NULL,
+	if (vq_is_packed(dev)) {
+		vq->async_buffers_packed = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem_packed),
+			RTE_CACHE_LINE_SIZE, node);
+	} else {
+		vq->async_descs_split = rte_malloc_socket(NULL,
 			vq->size * sizeof(struct vring_used_elem),
 			RTE_CACHE_LINE_SIZE, node);
-	if (!vq->async_descs_split || !vq->async_pkts_info ||
+	}
+
+	if (!vq->async_pkts_info ||
 		!vq->it_pool || !vq->vec_pool) {
 		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 658f6fc28..d6324fbf8 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -206,9 +206,14 @@ struct vhost_virtqueue {
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
 	uint16_t	async_last_pkts_n;
-	struct vring_used_elem  *async_descs_split;
+	union {
+		struct vring_used_elem  *async_descs_split;
+		struct vring_used_elem_packed *async_buffers_packed;
+	};
 	uint16_t async_desc_idx;
+	uint16_t async_packed_buffer_idx;
 	uint16_t last_async_desc_idx;
+	uint16_t last_async_buffer_idx;
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c..9e798226b 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -363,8 +363,7 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
-				   struct vhost_virtqueue *vq,
+vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 				   uint32_t len[],
 				   uint16_t id[],
 				   uint16_t count[],
@@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 		vq->shadow_aligned_idx += count[i];
 		vq->shadow_used_idx++;
 	}
+}
+
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t len[],
+				   uint16_t id[],
+				   uint16_t count[],
+				   uint16_t num_buffers)
+{
+	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
 	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
 		do_data_copy_enqueue(dev, vq);
@@ -1633,12 +1643,343 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    struct buf_vector *buf_vec,
+			    uint16_t *nr_descs,
+			    uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count;
+	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
+	*nr_buffers = 0;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		buffer_len[*nr_buffers] = len;
+		buffer_buf_id[*nr_buffers] = buf_id;
+		buffer_desc_count[*nr_buffers] = desc_count;
+		*nr_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
+		src_iovec, dst_iovec, src_it, dst_it) < 0)
+		return -1;
+
+	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
+					   buffer_desc_count, *nr_buffers);
+
+	return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    uint16_t *nr_descs, uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	*nr_descs = 0;
+	*nr_buffers = 0;
+
+	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec,
+						 nr_descs,
+						 nr_buffers,
+						 src_iovec, dst_iovec,
+						 src_it, dst_it) < 0)) {
+		VHOST_LOG_DATA(DEBUG,
+				"(%d) failed to get enough desc from vring\n",
+				dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + *nr_descs);
+
+	return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count,
+	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint16_t num_buffers;
+	uint16_t num_desc;
+
+	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+	struct iovec *vec_pool = vq->vec_pool;
+	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+	struct iovec *src_iovec = vec_pool;
+	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct rte_vhost_iov_iter *src_it = it_pool;
+	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+	uint16_t slot_idx = 0;
+	uint16_t segs_await = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+	uint32_t n_pkts = 0, pkt_err = 0;
+	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	struct {
+		uint16_t pkt_idx;
+		uint16_t last_avail_idx;
+	} async_pkts_log[MAX_PKT_BURST];
+
+	rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
+
+	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
+						pkts[pkt_idx],
+						&num_desc, &num_buffers,
+						src_iovec, dst_iovec,
+						src_it, dst_it) < 0)) {
+			break;
+		}
+
+		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + num_desc);
+
+		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+			(vq->size - 1);
+		if (src_it->count) {
+			uint16_t from, to;
+
+			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+			pkts_info[slot_idx].descs = num_desc;
+			pkts_info[slot_idx].nr_buffers = num_buffers;
+			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
+			async_pkts_log[num_async_pkts++].last_avail_idx =
+				vq->last_avail_idx;
+			src_iovec += src_it->nr_segs;
+			dst_iovec += dst_it->nr_segs;
+			src_it += 2;
+			dst_it += 2;
+			segs_await += src_it->nr_segs;
+
+			/**
+			 * recover shadow used ring and keep DMA-occupied
+			 * descriptors.
+			 */
+			from = vq->shadow_used_idx - num_buffers;
+			to = vq->async_packed_buffer_idx & (vq->size - 1);
+			if (num_buffers + to <= vq->size) {
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					num_buffers *
+					sizeof(struct vring_used_elem_packed));
+			} else {
+				int size = vq->size - to;
+
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					size *
+					sizeof(struct vring_used_elem_packed));
+				rte_memcpy(vq->async_buffers_packed,
+					&vq->shadow_used_packed[from +
+					size], (num_buffers - size) *
+					sizeof(struct vring_used_elem_packed));
+			}
+			vq->async_packed_buffer_idx += num_buffers;
+			vq->shadow_used_idx -= num_buffers;
+		} else
+			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+
+		vq_inc_last_avail_packed(vq, num_desc);
+
+		/*
+		 * conditions to trigger async device transfer:
+		 * - buffered packet number reaches transfer threshold
+		 * - unused async iov number is less than max vhost vector
+		 */
+		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+			BUF_VECTOR_MAX))) {
+			n_pkts = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			src_iovec = vec_pool;
+			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+			src_it = it_pool;
+			dst_it = it_pool + 1;
+			segs_await = 0;
+			vq->async_pkts_inflight_n += n_pkts;
+
+			if (unlikely(n_pkts < pkt_burst_idx)) {
+				/*
+				 * log error packets number here and do actual
+				 * error processing when applications poll
+				 * completion
+				 */
+				pkt_err = pkt_burst_idx - n_pkts;
+				pkt_burst_idx = 0;
+				break;
+			}
+
+			pkt_burst_idx = 0;
+		}
+	}
+
+	if (pkt_burst_idx) {
+		n_pkts = vq->async_ops.transfer_data(dev->vid,
+				queue_id, tdes, 0, pkt_burst_idx);
+		vq->async_pkts_inflight_n += n_pkts;
+
+		if (unlikely(n_pkts < pkt_burst_idx))
+			pkt_err = pkt_burst_idx - n_pkts;
+	}
+
+	do_data_copy_enqueue(dev, vq);
+
+	if (unlikely(pkt_err)) {
+		uint16_t num_buffers = 0;
+
+		num_async_pkts -= pkt_err;
+		/* calculate the sum of descriptors of DMA-error packets. */
+		while (pkt_err-- > 0) {
+			num_buffers +=
+				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
+			slot_idx--;
+		}
+		vq->async_packed_buffer_idx -= num_buffers;
+		/* recover shadow used ring and available ring */
+		vq->shadow_used_idx -= (vq->last_avail_idx -
+				async_pkts_log[num_async_pkts].last_avail_idx -
+				num_buffers);
+		vq->last_avail_idx =
+			async_pkts_log[num_async_pkts].last_avail_idx;
+		pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
+		num_done_pkts = pkt_idx - num_async_pkts;
+	}
+
+	vq->async_pkts_idx += num_async_pkts;
+	*comp_count = num_done_pkts;
+
+	if (likely(vq->shadow_used_idx)) {
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+
+	return pkt_idx;
+}
+
+static __rte_always_inline void
+vhost_update_used_packed(struct virtio_net *dev,
+				  struct vhost_virtqueue *vq,
+				  struct vring_used_elem_packed *shadow_ring,
+				  uint16_t count)
+{
+	if (count == 0)
+		return;
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < count; i++) {
+		vq->desc_packed[used_idx].id = shadow_ring[i].id;
+		vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+		used_idx += shadow_ring[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	/* The ordering for storing desc flags needs to be enforced. */
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	for (i = 0; i < count; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+
+			vhost_log_cache_used_vring(dev, vq,
+					vq->last_used_idx *
+					sizeof(struct vring_packed_desc),
+					sizeof(struct vring_packed_desc));
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, shadow_ring[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq,
+				head_idx *
+				sizeof(struct vring_packed_desc),
+				sizeof(struct vring_packed_desc));
+
+	vhost_log_cache_sync(dev, vq);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
@@ -1680,53 +2021,98 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto done;
 	}
 
-	for (i = 0; i < n_pkts_put; i++) {
-		from = (start_idx + i) & (vq_size - 1);
-		n_descs += pkts_info[from].descs;
-		pkts[i] = pkts_info[from].mbuf;
+	if (vq_is_packed(dev)) {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_buffers += pkts_info[from].nr_buffers;
+			pkts[i] = pkts_info[from].mbuf;
+		}
+	} else {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_descs += pkts_info[from].descs;
+			pkts[i] = pkts_info[from].mbuf;
+		}
 	}
+
 	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
 	vq->async_pkts_inflight_n -= n_pkts_put;
 
 	if (likely(vq->enabled && vq->access_ok)) {
-		uint16_t nr_left = n_descs;
 		uint16_t nr_copy;
 		uint16_t to;
 
 		/* write back completed descriptors to used ring */
-		do {
-			from = vq->last_async_desc_idx & (vq->size - 1);
-			nr_copy = nr_left + from <= vq->size ? nr_left :
-				vq->size - from;
-			to = vq->last_used_idx & (vq->size - 1);
-
-			if (to + nr_copy <= vq->size) {
-				rte_memcpy(&vq->used->ring[to],
+		if (vq_is_packed(dev)) {
+			uint16_t nr_left = n_buffers;
+			uint16_t to;
+			do {
+				to = vq->async_packed_buffer_idx &
+								(vq->size - 1);
+				from = vq->last_async_buffer_idx &
+								(vq->size - 1);
+				if (to == from)
+					break;
+				if (to > from) {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						to - from);
+					vq->last_async_buffer_idx += to - from;
+					nr_left -= to - from;
+				} else {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						vq->size - from);
+					vq->last_async_buffer_idx +=
+								vq->size - from;
+					nr_left -= vq->size - from;
+				}
+			} while (nr_left > 0);
+			vhost_vring_call_packed(dev, vq);
+		} else {
+			uint16_t nr_left = n_descs;
+			do {
+				from = vq->last_async_desc_idx & (vq->size - 1);
+				nr_copy = nr_left + from <= vq->size ? nr_left :
+					vq->size - from;
+				to = vq->last_used_idx & (vq->size - 1);
+
+				if (to + nr_copy <= vq->size) {
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						nr_copy *
 						sizeof(struct vring_used_elem));
-			} else {
-				uint16_t size = vq->size - to;
+				} else {
+					uint16_t size = vq->size - to;
 
-				rte_memcpy(&vq->used->ring[to],
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						size *
 						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->used->ring,
+					rte_memcpy(vq->used->ring,
 						&vq->async_descs_split[from +
 						size], (nr_copy - size) *
 						sizeof(struct vring_used_elem));
-			}
+				}
+
+				vq->last_async_desc_idx += nr_copy;
+				vq->last_used_idx += nr_copy;
+				nr_left -= nr_copy;
+			} while (nr_left > 0);
+
+			__atomic_add_fetch(&vq->used->idx, n_descs,
+					__ATOMIC_RELEASE);
+			vhost_vring_call_split(dev, vq);
+		}
 
-			vq->last_async_desc_idx += nr_copy;
-			vq->last_used_idx += nr_copy;
-			nr_left -= nr_copy;
-		} while (nr_left > 0);
 
-		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-		vhost_vring_call_split(dev, vq);
-	} else
-		vq->last_async_desc_idx += n_descs;
+
+	} else {
+		if (vq_is_packed(dev))
+			vq->last_async_buffer_idx += n_buffers;
+		else
+			vq->last_async_desc_idx += n_descs;
+	}
 
 done:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -1767,9 +2153,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (count == 0)
 		goto out;
 
-	/* TODO: packed queue not implemented */
 	if (vq_is_packed(dev))
-		nb_tx = 0;
+		nb_tx = virtio_dev_rx_async_submit_packed(dev,
+				vq, queue_id, pkts, count, comp_pkts,
+				comp_count);
 	else
 		nb_tx = virtio_dev_rx_async_submit_split(dev,
 				vq, queue_id, pkts, count, comp_pkts,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v2] vhost: add support for packed ring in async vhost
  2021-03-17  8:54 [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost Cheng Jiang
@ 2021-03-22  6:15 ` Cheng Jiang
  2021-03-24  9:19   ` Liu, Yong
  2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
  2 siblings, 1 reply; 13+ messages in thread
From: Cheng Jiang @ 2021-03-22  6:15 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

For now async vhost data path only supports split ring structure. In
order to make async vhost compatible with virtio 1.1 spec this patch
enables packed ring in async vhost data path.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
v2:
  * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
  * add async_buffers_packed memory free in vhost_free_async_mem()

 lib/librte_vhost/rte_vhost_async.h |   1 +
 lib/librte_vhost/vhost.c           |  24 +-
 lib/librte_vhost/vhost.h           |   7 +-
 lib/librte_vhost/virtio_net.c      | 447 +++++++++++++++++++++++++++--
 4 files changed, 441 insertions(+), 38 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost_async.h b/lib/librte_vhost/rte_vhost_async.h
index c855ff875..6faa31f5a 100644
--- a/lib/librte_vhost/rte_vhost_async.h
+++ b/lib/librte_vhost/rte_vhost_async.h
@@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
 struct async_inflight_info {
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
+	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
 };

 /**
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 52ab93d1e..51b44d6f2 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue *vq)
 {
 	if (vq->async_pkts_info)
 		rte_free(vq->async_pkts_info);
-	if (vq->async_descs_split)
+	if (vq->async_buffers_packed) {
+		rte_free(vq->async_buffers_packed);
+		vq->async_buffers_packed = NULL;
+	} else {
 		rte_free(vq->async_descs_split);
+		vq->async_descs_split = NULL;
+	}
+
 	if (vq->it_pool)
 		rte_free(vq->it_pool);
 	if (vq->vec_pool)
 		rte_free(vq->vec_pool);

 	vq->async_pkts_info = NULL;
-	vq->async_descs_split = NULL;
 	vq->it_pool = NULL;
 	vq->vec_pool = NULL;
 }
@@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 		return -1;

 	/* packed queue is not supported */
-	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+	if (unlikely(!f.async_inorder)) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on packed queue or non-inorder mode "
+			"async copy is not supported on non-inorder mode "
 			"(vid %d, qid: %d)\n", vid, queue_id);
 		return -1;
 	}
@@ -1643,10 +1648,17 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->vec_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
 			RTE_CACHE_LINE_SIZE, node);
-	vq->async_descs_split = rte_malloc_socket(NULL,
+	if (vq_is_packed(dev)) {
+		vq->async_buffers_packed = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem_packed),
+			RTE_CACHE_LINE_SIZE, node);
+	} else {
+		vq->async_descs_split = rte_malloc_socket(NULL,
 			vq->size * sizeof(struct vring_used_elem),
 			RTE_CACHE_LINE_SIZE, node);
-	if (!vq->async_descs_split || !vq->async_pkts_info ||
+	}
+
+	if (!vq->async_pkts_info ||
 		!vq->it_pool || !vq->vec_pool) {
 		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 658f6fc28..d6324fbf8 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -206,9 +206,14 @@ struct vhost_virtqueue {
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
 	uint16_t	async_last_pkts_n;
-	struct vring_used_elem  *async_descs_split;
+	union {
+		struct vring_used_elem  *async_descs_split;
+		struct vring_used_elem_packed *async_buffers_packed;
+	};
 	uint16_t async_desc_idx;
+	uint16_t async_packed_buffer_idx;
 	uint16_t last_async_desc_idx;
+	uint16_t last_async_buffer_idx;

 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c..fa8c4f4fe 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -363,8 +363,7 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
 }

 static __rte_always_inline void
-vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
-				   struct vhost_virtqueue *vq,
+vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 				   uint32_t len[],
 				   uint16_t id[],
 				   uint16_t count[],
@@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 		vq->shadow_aligned_idx += count[i];
 		vq->shadow_used_idx++;
 	}
+}
+
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t len[],
+				   uint16_t id[],
+				   uint16_t count[],
+				   uint16_t num_buffers)
+{
+	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);

 	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
 		do_data_copy_enqueue(dev, vq);
@@ -1633,12 +1643,343 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }

+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    struct buf_vector *buf_vec,
+			    uint16_t *nr_descs,
+			    uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count;
+	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
+	*nr_buffers = 0;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		buffer_len[*nr_buffers] = len;
+		buffer_buf_id[*nr_buffers] = buf_id;
+		buffer_desc_count[*nr_buffers] = desc_count;
+		*nr_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
+		src_iovec, dst_iovec, src_it, dst_it) < 0)
+		return -1;
+
+	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
+					   buffer_desc_count, *nr_buffers);
+
+	return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    uint16_t *nr_descs, uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	*nr_descs = 0;
+	*nr_buffers = 0;
+
+	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec,
+						 nr_descs,
+						 nr_buffers,
+						 src_iovec, dst_iovec,
+						 src_it, dst_it) < 0)) {
+		VHOST_LOG_DATA(DEBUG,
+				"(%d) failed to get enough desc from vring\n",
+				dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + *nr_descs);
+
+	return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count,
+	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint16_t num_buffers;
+	uint16_t num_desc;
+
+	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+	struct iovec *vec_pool = vq->vec_pool;
+	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+	struct iovec *src_iovec = vec_pool;
+	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct rte_vhost_iov_iter *src_it = it_pool;
+	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+	uint16_t slot_idx = 0;
+	uint16_t segs_await = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+	uint32_t n_pkts = 0, pkt_err = 0;
+	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	struct {
+		uint16_t pkt_idx;
+		uint16_t last_avail_idx;
+	} async_pkts_log[MAX_PKT_BURST];
+
+	rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
+
+	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
+						pkts[pkt_idx],
+						&num_desc, &num_buffers,
+						src_iovec, dst_iovec,
+						src_it, dst_it) < 0)) {
+			break;
+		}
+
+		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + num_desc);
+
+		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+			(vq->size - 1);
+		if (src_it->count) {
+			uint16_t from, to;
+
+			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+			pkts_info[slot_idx].descs = num_desc;
+			pkts_info[slot_idx].nr_buffers = num_buffers;
+			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
+			async_pkts_log[num_async_pkts++].last_avail_idx =
+				vq->last_avail_idx;
+			src_iovec += src_it->nr_segs;
+			dst_iovec += dst_it->nr_segs;
+			src_it += 2;
+			dst_it += 2;
+			segs_await += src_it->nr_segs;
+
+			/**
+			 * recover shadow used ring and keep DMA-occupied
+			 * descriptors.
+			 */
+			from = vq->shadow_used_idx - num_buffers;
+			to = vq->async_packed_buffer_idx & (vq->size - 1);
+			if (num_buffers + to <= vq->size) {
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					num_buffers *
+					sizeof(struct vring_used_elem_packed));
+			} else {
+				int size = vq->size - to;
+
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					size *
+					sizeof(struct vring_used_elem_packed));
+				rte_memcpy(vq->async_buffers_packed,
+					&vq->shadow_used_packed[from +
+					size], (num_buffers - size) *
+					sizeof(struct vring_used_elem_packed));
+			}
+			vq->async_packed_buffer_idx += num_buffers;
+			vq->shadow_used_idx -= num_buffers;
+		} else
+			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+
+		vq_inc_last_avail_packed(vq, num_desc);
+
+		/*
+		 * conditions to trigger async device transfer:
+		 * - buffered packet number reaches transfer threshold
+		 * - unused async iov number is less than max vhost vector
+		 */
+		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+			BUF_VECTOR_MAX))) {
+			n_pkts = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			src_iovec = vec_pool;
+			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+			src_it = it_pool;
+			dst_it = it_pool + 1;
+			segs_await = 0;
+			vq->async_pkts_inflight_n += n_pkts;
+
+			if (unlikely(n_pkts < pkt_burst_idx)) {
+				/*
+				 * log error packets number here and do actual
+				 * error processing when applications poll
+				 * completion
+				 */
+				pkt_err = pkt_burst_idx - n_pkts;
+				pkt_burst_idx = 0;
+				break;
+			}
+
+			pkt_burst_idx = 0;
+		}
+	}
+
+	if (pkt_burst_idx) {
+		n_pkts = vq->async_ops.transfer_data(dev->vid,
+				queue_id, tdes, 0, pkt_burst_idx);
+		vq->async_pkts_inflight_n += n_pkts;
+
+		if (unlikely(n_pkts < pkt_burst_idx))
+			pkt_err = pkt_burst_idx - n_pkts;
+	}
+
+	do_data_copy_enqueue(dev, vq);
+
+	if (unlikely(pkt_err)) {
+		uint16_t num_buffers = 0;
+
+		num_async_pkts -= pkt_err;
+		/* calculate the sum of descriptors of DMA-error packets. */
+		while (pkt_err-- > 0) {
+			num_buffers +=
+				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
+			slot_idx--;
+		}
+		vq->async_packed_buffer_idx -= num_buffers;
+		/* recover shadow used ring and available ring */
+		vq->shadow_used_idx -= (vq->last_avail_idx -
+				async_pkts_log[num_async_pkts].last_avail_idx -
+				num_buffers);
+		vq->last_avail_idx =
+			async_pkts_log[num_async_pkts].last_avail_idx;
+		pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
+		num_done_pkts = pkt_idx - num_async_pkts;
+	}
+
+	vq->async_pkts_idx += num_async_pkts;
+	*comp_count = num_done_pkts;
+
+	if (likely(vq->shadow_used_idx)) {
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+
+	return pkt_idx;
+}
+
+static __rte_always_inline void
+vhost_update_used_packed(struct virtio_net *dev,
+				  struct vhost_virtqueue *vq,
+				  struct vring_used_elem_packed *shadow_ring,
+				  uint16_t count)
+{
+	if (count == 0)
+		return;
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < count; i++) {
+		vq->desc_packed[used_idx].id = shadow_ring[i].id;
+		vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+		used_idx += shadow_ring[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	/* The ordering for storing desc flags needs to be enforced. */
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	for (i = 0; i < count; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+
+			vhost_log_cache_used_vring(dev, vq,
+					vq->last_used_idx *
+					sizeof(struct vring_packed_desc),
+					sizeof(struct vring_packed_desc));
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, shadow_ring[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq,
+				head_idx *
+				sizeof(struct vring_packed_desc),
+				sizeof(struct vring_packed_desc));
+
+	vhost_log_cache_sync(dev, vq);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
@@ -1680,53 +2021,96 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto done;
 	}

-	for (i = 0; i < n_pkts_put; i++) {
-		from = (start_idx + i) & (vq_size - 1);
-		n_descs += pkts_info[from].descs;
-		pkts[i] = pkts_info[from].mbuf;
+	if (vq_is_packed(dev)) {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_buffers += pkts_info[from].nr_buffers;
+			pkts[i] = pkts_info[from].mbuf;
+		}
+	} else {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_descs += pkts_info[from].descs;
+			pkts[i] = pkts_info[from].mbuf;
+		}
 	}
+
 	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
 	vq->async_pkts_inflight_n -= n_pkts_put;

 	if (likely(vq->enabled && vq->access_ok)) {
-		uint16_t nr_left = n_descs;
 		uint16_t nr_copy;
 		uint16_t to;

 		/* write back completed descriptors to used ring */
-		do {
-			from = vq->last_async_desc_idx & (vq->size - 1);
-			nr_copy = nr_left + from <= vq->size ? nr_left :
-				vq->size - from;
-			to = vq->last_used_idx & (vq->size - 1);
-
-			if (to + nr_copy <= vq->size) {
-				rte_memcpy(&vq->used->ring[to],
+		if (vq_is_packed(dev)) {
+			uint16_t nr_left = n_buffers;
+			uint16_t to;
+			do {
+				from = vq->last_async_buffer_idx &
+								(vq->size - 1);
+				to = (from + nr_left) & (vq->size - 1);
+
+				if (to > from) {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						to - from);
+					vq->last_async_buffer_idx += nr_left;
+					nr_left = 0;
+				} else {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						vq->size - from);
+					vq->last_async_buffer_idx +=
+								vq->size - from;
+					nr_left -= vq->size - from;
+				}
+			} while (nr_left > 0);
+			vhost_vring_call_packed(dev, vq);
+		} else {
+			uint16_t nr_left = n_descs;
+			do {
+				from = vq->last_async_desc_idx & (vq->size - 1);
+				nr_copy = nr_left + from <= vq->size ? nr_left :
+					vq->size - from;
+				to = vq->last_used_idx & (vq->size - 1);
+
+				if (to + nr_copy <= vq->size) {
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						nr_copy *
 						sizeof(struct vring_used_elem));
-			} else {
-				uint16_t size = vq->size - to;
+				} else {
+					uint16_t size = vq->size - to;

-				rte_memcpy(&vq->used->ring[to],
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						size *
 						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->used->ring,
+					rte_memcpy(vq->used->ring,
 						&vq->async_descs_split[from +
 						size], (nr_copy - size) *
 						sizeof(struct vring_used_elem));
-			}
+				}
+
+				vq->last_async_desc_idx += nr_copy;
+				vq->last_used_idx += nr_copy;
+				nr_left -= nr_copy;
+			} while (nr_left > 0);
+
+			__atomic_add_fetch(&vq->used->idx, n_descs,
+					__ATOMIC_RELEASE);
+			vhost_vring_call_split(dev, vq);
+		}

-			vq->last_async_desc_idx += nr_copy;
-			vq->last_used_idx += nr_copy;
-			nr_left -= nr_copy;
-		} while (nr_left > 0);

-		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-		vhost_vring_call_split(dev, vq);
-	} else
-		vq->last_async_desc_idx += n_descs;
+
+	} else {
+		if (vq_is_packed(dev))
+			vq->last_async_buffer_idx += n_buffers;
+		else
+			vq->last_async_desc_idx += n_descs;
+	}

 done:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -1767,9 +2151,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (count == 0)
 		goto out;

-	/* TODO: packed queue not implemented */
 	if (vq_is_packed(dev))
-		nb_tx = 0;
+		nb_tx = virtio_dev_rx_async_submit_packed(dev,
+				vq, queue_id, pkts, count, comp_pkts,
+				comp_count);
 	else
 		nb_tx = virtio_dev_rx_async_submit_split(dev,
 				vq, queue_id, pkts, count, comp_pkts,
--
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: add support for packed ring in async vhost
  2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
@ 2021-03-24  9:19   ` Liu, Yong
  2021-03-29 12:29     ` Jiang, Cheng1
  0 siblings, 1 reply; 13+ messages in thread
From: Liu, Yong @ 2021-03-24  9:19 UTC (permalink / raw)
  To: Jiang, Cheng1, maxime.coquelin, Xia, Chenbo
  Cc: dev, Hu, Jiayu, Yang, YvonneX, Wang, Yinan, Jiang, Cheng1



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Cheng Jiang
> Sent: Monday, March 22, 2021 2:15 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jiang,
> Cheng1 <cheng1.jiang@intel.com>
> Subject: [dpdk-dev] [PATCH v2] vhost: add support for packed ring in async
> vhost
> 
> For now async vhost data path only supports split ring structure. In
> order to make async vhost compatible with virtio 1.1 spec this patch
> enables packed ring in async vhost data path.
> 
> Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> ---
> v2:
>   * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
>   * add async_buffers_packed memory free in vhost_free_async_mem()
> 
>  lib/librte_vhost/rte_vhost_async.h |   1 +
>  lib/librte_vhost/vhost.c           |  24 +-
>  lib/librte_vhost/vhost.h           |   7 +-
>  lib/librte_vhost/virtio_net.c      | 447 +++++++++++++++++++++++++++--
>  4 files changed, 441 insertions(+), 38 deletions(-)
> 
> diff --git a/lib/librte_vhost/rte_vhost_async.h
> b/lib/librte_vhost/rte_vhost_async.h
> index c855ff875..6faa31f5a 100644
> --- a/lib/librte_vhost/rte_vhost_async.h
> +++ b/lib/librte_vhost/rte_vhost_async.h
> @@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
>  struct async_inflight_info {
>  	struct rte_mbuf *mbuf;
>  	uint16_t descs; /* num of descs inflight */
> +	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
>  };
> 
>  /**
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 52ab93d1e..51b44d6f2 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue
> *vq)
>  {
>  	if (vq->async_pkts_info)
>  		rte_free(vq->async_pkts_info);
> -	if (vq->async_descs_split)
> +	if (vq->async_buffers_packed) {
> +		rte_free(vq->async_buffers_packed);
> +		vq->async_buffers_packed = NULL;
> +	} else {
>  		rte_free(vq->async_descs_split);
> +		vq->async_descs_split = NULL;
> +	}
> +
>  	if (vq->it_pool)
>  		rte_free(vq->it_pool);
>  	if (vq->vec_pool)
>  		rte_free(vq->vec_pool);
> 
>  	vq->async_pkts_info = NULL;
> -	vq->async_descs_split = NULL;
>  	vq->it_pool = NULL;
>  	vq->vec_pool = NULL;
>  }
> @@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>  		return -1;
> 
>  	/* packed queue is not supported */
> -	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
> +	if (unlikely(!f.async_inorder)) {
>  		VHOST_LOG_CONFIG(ERR,
> -			"async copy is not supported on packed queue or
> non-inorder mode "
> +			"async copy is not supported on non-inorder mode "
>  			"(vid %d, qid: %d)\n", vid, queue_id);
>  		return -1;
>  	}
> @@ -1643,10 +1648,17 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>  	vq->vec_pool = rte_malloc_socket(NULL,
>  			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
>  			RTE_CACHE_LINE_SIZE, node);
> -	vq->async_descs_split = rte_malloc_socket(NULL,
> +	if (vq_is_packed(dev)) {
> +		vq->async_buffers_packed = rte_malloc_socket(NULL,
> +			vq->size * sizeof(struct vring_used_elem_packed),
> +			RTE_CACHE_LINE_SIZE, node);
> +	} else {
> +		vq->async_descs_split = rte_malloc_socket(NULL,
>  			vq->size * sizeof(struct vring_used_elem),
>  			RTE_CACHE_LINE_SIZE, node);
> -	if (!vq->async_descs_split || !vq->async_pkts_info ||
> +	}
> +
> +	if (!vq->async_pkts_info ||
>  		!vq->it_pool || !vq->vec_pool) {
>  		vhost_free_async_mem(vq);
>  		VHOST_LOG_CONFIG(ERR,
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 658f6fc28..d6324fbf8 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -206,9 +206,14 @@ struct vhost_virtqueue {
>  	uint16_t	async_pkts_idx;
>  	uint16_t	async_pkts_inflight_n;
>  	uint16_t	async_last_pkts_n;
> -	struct vring_used_elem  *async_descs_split;
> +	union {
> +		struct vring_used_elem  *async_descs_split;
> +		struct vring_used_elem_packed *async_buffers_packed;
> +	};
>  	uint16_t async_desc_idx;
> +	uint16_t async_packed_buffer_idx;
>  	uint16_t last_async_desc_idx;
> +	uint16_t last_async_buffer_idx;
> 
>  	/* vq async features */
>  	bool		async_inorder;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 583bf379c..fa8c4f4fe 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -363,8 +363,7 @@
> vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
>  }
> 
>  static __rte_always_inline void
> -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> -				   struct vhost_virtqueue *vq,
> +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
>  				   uint32_t len[],
>  				   uint16_t id[],
>  				   uint16_t count[],
> @@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct
> virtio_net *dev,
>  		vq->shadow_aligned_idx += count[i];
>  		vq->shadow_used_idx++;
>  	}
> +}
> +
> +static __rte_always_inline void
> +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> +				   struct vhost_virtqueue *vq,
> +				   uint32_t len[],
> +				   uint16_t id[],
> +				   uint16_t count[],
> +				   uint16_t num_buffers)
> +{
> +	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
> 
>  	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
>  		do_data_copy_enqueue(dev, vq);
> @@ -1633,12 +1643,343 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>  	return pkt_idx;
>  }
> 
> +static __rte_always_inline int
> +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> +			    struct vhost_virtqueue *vq,
> +			    struct rte_mbuf *pkt,
> +			    struct buf_vector *buf_vec,
> +			    uint16_t *nr_descs,
> +			    uint16_t *nr_buffers,
> +			    struct iovec *src_iovec, struct iovec *dst_iovec,
> +			    struct rte_vhost_iov_iter *src_it,
> +			    struct rte_vhost_iov_iter *dst_it)
> +{
> +	uint16_t nr_vec = 0;
> +	uint16_t avail_idx = vq->last_avail_idx;
> +	uint16_t max_tries, tries = 0;
> +	uint16_t buf_id = 0;
> +	uint32_t len = 0;
> +	uint16_t desc_count;
> +	uint32_t size = pkt->pkt_len + sizeof(struct
> virtio_net_hdr_mrg_rxbuf);
> +	uint32_t buffer_len[vq->size];
> +	uint16_t buffer_buf_id[vq->size];
> +	uint16_t buffer_desc_count[vq->size];
> +	*nr_buffers = 0;
> +
> +	if (rxvq_is_mergeable(dev))
> +		max_tries = vq->size - 1;
> +	else
> +		max_tries = 1;
> +
> +	while (size > 0) {
> +		/*
> +		 * if we tried all available ring items, and still
> +		 * can't get enough buf, it means something abnormal
> +		 * happened.
> +		 */
> +		if (unlikely(++tries > max_tries))
> +			return -1;
> +
> +		if (unlikely(fill_vec_buf_packed(dev, vq,
> +						avail_idx, &desc_count,
> +						buf_vec, &nr_vec,
> +						&buf_id, &len,
> +						VHOST_ACCESS_RW) < 0))
> +			return -1;
> +
> +		len = RTE_MIN(len, size);
> +		size -= len;
> +
> +		buffer_len[*nr_buffers] = len;
> +		buffer_buf_id[*nr_buffers] = buf_id;
> +		buffer_desc_count[*nr_buffers] = desc_count;
> +		*nr_buffers += 1;
> +
> +		*nr_descs += desc_count;
> +		avail_idx += desc_count;
> +		if (avail_idx >= vq->size)
> +			avail_idx -= vq->size;
> +	}
> +
> +	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> +		src_iovec, dst_iovec, src_it, dst_it) < 0)
> +		return -1;
> +
> +	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> +					   buffer_desc_count, *nr_buffers);
> +
> +	return 0;
> +}
> +
> +static __rte_always_inline int16_t
> +virtio_dev_rx_async_single_packed(struct virtio_net *dev,
> +			    struct vhost_virtqueue *vq,
> +			    struct rte_mbuf *pkt,
> +			    uint16_t *nr_descs, uint16_t *nr_buffers,
> +			    struct iovec *src_iovec, struct iovec *dst_iovec,
> +			    struct rte_vhost_iov_iter *src_it,
> +			    struct rte_vhost_iov_iter *dst_it)
> +{
> +	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> +	*nr_descs = 0;
> +	*nr_buffers = 0;
> +
> +	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> buf_vec,
> +						 nr_descs,
> +						 nr_buffers,
> +						 src_iovec, dst_iovec,
> +						 src_it, dst_it) < 0)) {
> +		VHOST_LOG_DATA(DEBUG,
> +				"(%d) failed to get enough desc from vring\n",
> +				dev->vid);
> +		return -1;
> +	}
> +
> +	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> +			dev->vid, vq->last_avail_idx,
> +			vq->last_avail_idx + *nr_descs);
> +
> +	return 0;
> +}
> +
> +static __rte_noinline uint32_t
> +virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> +	struct vhost_virtqueue *vq, uint16_t queue_id,
> +	struct rte_mbuf **pkts, uint32_t count,
> +	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
> +{

Hi Cheng,
There're some common parts in virtio_dev_rx_async_submit_packed and virtio_dev_rx_async_submit_split. 
We can abstract some functions for those common parts which can bring more clarity.

Also this patch may be too huge for reviewing, please separate it into few parts for better understanding. 

Thanks,
Marvin

> +	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
> +	uint16_t num_buffers;
> +	uint16_t num_desc;
> +
> +	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
> +	struct iovec *vec_pool = vq->vec_pool;
> +	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
> +	struct iovec *src_iovec = vec_pool;
> +	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
> +	struct rte_vhost_iov_iter *src_it = it_pool;
> +	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
> +	uint16_t slot_idx = 0;
> +	uint16_t segs_await = 0;
> +	struct async_inflight_info *pkts_info = vq->async_pkts_info;
> +	uint32_t n_pkts = 0, pkt_err = 0;
> +	uint32_t num_async_pkts = 0, num_done_pkts = 0;
> +	struct {
> +		uint16_t pkt_idx;
> +		uint16_t last_avail_idx;
> +	} async_pkts_log[MAX_PKT_BURST];
> +
> +	rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
> +
> +	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
> +		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
> +						pkts[pkt_idx],
> +						&num_desc, &num_buffers,
> +						src_iovec, dst_iovec,
> +						src_it, dst_it) < 0)) {
> +			break;
> +		}
> +
> +		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> +			dev->vid, vq->last_avail_idx,
> +			vq->last_avail_idx + num_desc);
> +
> +		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
> +			(vq->size - 1);
> +		if (src_it->count) {
> +			uint16_t from, to;
> +
> +			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
> +			pkts_info[slot_idx].descs = num_desc;
> +			pkts_info[slot_idx].nr_buffers = num_buffers;
> +			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> +			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
> +			async_pkts_log[num_async_pkts++].last_avail_idx =
> +				vq->last_avail_idx;
> +			src_iovec += src_it->nr_segs;
> +			dst_iovec += dst_it->nr_segs;
> +			src_it += 2;
> +			dst_it += 2;
> +			segs_await += src_it->nr_segs;
> +
> +			/**
> +			 * recover shadow used ring and keep DMA-occupied
> +			 * descriptors.
> +			 */
> +			from = vq->shadow_used_idx - num_buffers;
> +			to = vq->async_packed_buffer_idx & (vq->size - 1);
> +			if (num_buffers + to <= vq->size) {
> +				rte_memcpy(&vq->async_buffers_packed[to],
> +					&vq->shadow_used_packed[from],
> +					num_buffers *
> +					sizeof(struct
> vring_used_elem_packed));
> +			} else {
> +				int size = vq->size - to;
> +
> +				rte_memcpy(&vq->async_buffers_packed[to],
> +					&vq->shadow_used_packed[from],
> +					size *
> +					sizeof(struct
> vring_used_elem_packed));
> +				rte_memcpy(vq->async_buffers_packed,
> +					&vq->shadow_used_packed[from +
> +					size], (num_buffers - size) *
> +					sizeof(struct
> vring_used_elem_packed));
> +			}
> +			vq->async_packed_buffer_idx += num_buffers;
> +			vq->shadow_used_idx -= num_buffers;
> +		} else
> +			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> +
> +		vq_inc_last_avail_packed(vq, num_desc);
> +
> +		/*
> +		 * conditions to trigger async device transfer:
> +		 * - buffered packet number reaches transfer threshold
> +		 * - unused async iov number is less than max vhost vector
> +		 */
> +		if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> +			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> +			BUF_VECTOR_MAX))) {
> +			n_pkts = vq->async_ops.transfer_data(dev->vid,
> +					queue_id, tdes, 0, pkt_burst_idx);
> +			src_iovec = vec_pool;
> +			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >>
> 1);
> +			src_it = it_pool;
> +			dst_it = it_pool + 1;
> +			segs_await = 0;
> +			vq->async_pkts_inflight_n += n_pkts;
> +
> +			if (unlikely(n_pkts < pkt_burst_idx)) {
> +				/*
> +				 * log error packets number here and do
> actual
> +				 * error processing when applications poll
> +				 * completion
> +				 */
> +				pkt_err = pkt_burst_idx - n_pkts;
> +				pkt_burst_idx = 0;
> +				break;
> +			}
> +
> +			pkt_burst_idx = 0;
> +		}
> +	}
> +
> +	if (pkt_burst_idx) {
> +		n_pkts = vq->async_ops.transfer_data(dev->vid,
> +				queue_id, tdes, 0, pkt_burst_idx);
> +		vq->async_pkts_inflight_n += n_pkts;
> +
> +		if (unlikely(n_pkts < pkt_burst_idx))
> +			pkt_err = pkt_burst_idx - n_pkts;
> +	}
> +
> +	do_data_copy_enqueue(dev, vq);
> +
> +	if (unlikely(pkt_err)) {
> +		uint16_t num_buffers = 0;
> +
> +		num_async_pkts -= pkt_err;
> +		/* calculate the sum of descriptors of DMA-error packets. */
> +		while (pkt_err-- > 0) {
> +			num_buffers +=
> +				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
> +			slot_idx--;
> +		}
> +		vq->async_packed_buffer_idx -= num_buffers;
> +		/* recover shadow used ring and available ring */
> +		vq->shadow_used_idx -= (vq->last_avail_idx -
> +
> 	async_pkts_log[num_async_pkts].last_avail_idx -
> +				num_buffers);

Could it possible that vq->last_avail_idx smaller than async_pkts_log[num_async_pkts].last_avail_idx when operations near the ring's boundary? 

> +		vq->last_avail_idx =
> +			async_pkts_log[num_async_pkts].last_avail_idx;
> +		pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
> +		num_done_pkts = pkt_idx - num_async_pkts;
> +	}
> +
> +	vq->async_pkts_idx += num_async_pkts;
> +	*comp_count = num_done_pkts;
> +
> +	if (likely(vq->shadow_used_idx)) {
> +		vhost_flush_enqueue_shadow_packed(dev, vq);
> +		vhost_vring_call_packed(dev, vq);
> +	}
> +
> +	return pkt_idx;
> +}
> +
> +static __rte_always_inline void
> +vhost_update_used_packed(struct virtio_net *dev,
> +				  struct vhost_virtqueue *vq,
> +				  struct vring_used_elem_packed
> *shadow_ring,
> +				  uint16_t count)
> +{
> +	if (count == 0)
> +		return;
> +	int i;
> +	uint16_t used_idx = vq->last_used_idx;
> +	uint16_t head_idx = vq->last_used_idx;
> +	uint16_t head_flags = 0;
> +
> +	/* Split loop in two to save memory barriers */
> +	for (i = 0; i < count; i++) {
> +		vq->desc_packed[used_idx].id = shadow_ring[i].id;
> +		vq->desc_packed[used_idx].len = shadow_ring[i].len;
> +
> +		used_idx += shadow_ring[i].count;
> +		if (used_idx >= vq->size)
> +			used_idx -= vq->size;
> +	}
> +
> +	/* The ordering for storing desc flags needs to be enforced. */
> +	rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +
> +	for (i = 0; i < count; i++) {
> +		uint16_t flags;
> +
> +		if (vq->shadow_used_packed[i].len)
> +			flags = VRING_DESC_F_WRITE;
> +		else
> +			flags = 0;
> +
> +		if (vq->used_wrap_counter) {
> +			flags |= VRING_DESC_F_USED;
> +			flags |= VRING_DESC_F_AVAIL;
> +		} else {
> +			flags &= ~VRING_DESC_F_USED;
> +			flags &= ~VRING_DESC_F_AVAIL;
> +		}
> +
> +		if (i > 0) {
> +			vq->desc_packed[vq->last_used_idx].flags = flags;
> +
> +			vhost_log_cache_used_vring(dev, vq,
> +					vq->last_used_idx *
> +					sizeof(struct vring_packed_desc),
> +					sizeof(struct vring_packed_desc));
> +		} else {
> +			head_idx = vq->last_used_idx;
> +			head_flags = flags;
> +		}
> +
> +		vq_inc_last_used_packed(vq, shadow_ring[i].count);
> +	}
> +
> +	vq->desc_packed[head_idx].flags = head_flags;
> +
> +	vhost_log_cache_used_vring(dev, vq,
> +				head_idx *
> +				sizeof(struct vring_packed_desc),
> +				sizeof(struct vring_packed_desc));
> +
> +	vhost_log_cache_sync(dev, vq);
> +}
> +
>  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		struct rte_mbuf **pkts, uint16_t count)
>  {
>  	struct virtio_net *dev = get_device(vid);
>  	struct vhost_virtqueue *vq;
> -	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
> +	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
>  	uint16_t start_idx, pkts_idx, vq_size;
>  	struct async_inflight_info *pkts_info;
>  	uint16_t from, i;
> @@ -1680,53 +2021,96 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		goto done;
>  	}
> 
> -	for (i = 0; i < n_pkts_put; i++) {
> -		from = (start_idx + i) & (vq_size - 1);
> -		n_descs += pkts_info[from].descs;
> -		pkts[i] = pkts_info[from].mbuf;
> +	if (vq_is_packed(dev)) {
> +		for (i = 0; i < n_pkts_put; i++) {
> +			from = (start_idx + i) & (vq_size - 1);
> +			n_buffers += pkts_info[from].nr_buffers;
> +			pkts[i] = pkts_info[from].mbuf;
> +		}
> +	} else {
> +		for (i = 0; i < n_pkts_put; i++) {
> +			from = (start_idx + i) & (vq_size - 1);
> +			n_descs += pkts_info[from].descs;
> +			pkts[i] = pkts_info[from].mbuf;
> +		}
>  	}
> +
>  	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
>  	vq->async_pkts_inflight_n -= n_pkts_put;
> 
>  	if (likely(vq->enabled && vq->access_ok)) {
> -		uint16_t nr_left = n_descs;
>  		uint16_t nr_copy;
>  		uint16_t to;
> 
>  		/* write back completed descriptors to used ring */
> -		do {
> -			from = vq->last_async_desc_idx & (vq->size - 1);
> -			nr_copy = nr_left + from <= vq->size ? nr_left :
> -				vq->size - from;
> -			to = vq->last_used_idx & (vq->size - 1);
> -
> -			if (to + nr_copy <= vq->size) {
> -				rte_memcpy(&vq->used->ring[to],
> +		if (vq_is_packed(dev)) {
> +			uint16_t nr_left = n_buffers;
> +			uint16_t to;
> +			do {
> +				from = vq->last_async_buffer_idx &
> +								(vq->size - 1);
> +				to = (from + nr_left) & (vq->size - 1);
> +
> +				if (to > from) {
> +					vhost_update_used_packed(dev, vq,
> +						vq->async_buffers_packed +
> from,
> +						to - from);
> +					vq->last_async_buffer_idx += nr_left;
> +					nr_left = 0;
> +				} else {
> +					vhost_update_used_packed(dev, vq,
> +						vq->async_buffers_packed +
> from,
> +						vq->size - from);
> +					vq->last_async_buffer_idx +=
> +								vq->size -
> from;
> +					nr_left -= vq->size - from;
> +				}
> +			} while (nr_left > 0);
> +			vhost_vring_call_packed(dev, vq);
> +		} else {
> +			uint16_t nr_left = n_descs;
> +			do {
> +				from = vq->last_async_desc_idx & (vq->size -
> 1);
> +				nr_copy = nr_left + from <= vq->size ? nr_left :
> +					vq->size - from;
> +				to = vq->last_used_idx & (vq->size - 1);
> +
> +				if (to + nr_copy <= vq->size) {
> +					rte_memcpy(&vq->used->ring[to],
>  						&vq-
> >async_descs_split[from],
>  						nr_copy *
>  						sizeof(struct
> vring_used_elem));
> -			} else {
> -				uint16_t size = vq->size - to;
> +				} else {
> +					uint16_t size = vq->size - to;
> 
> -				rte_memcpy(&vq->used->ring[to],
> +					rte_memcpy(&vq->used->ring[to],
>  						&vq-
> >async_descs_split[from],
>  						size *
>  						sizeof(struct
> vring_used_elem));
> -				rte_memcpy(vq->used->ring,
> +					rte_memcpy(vq->used->ring,
>  						&vq->async_descs_split[from
> +
>  						size], (nr_copy - size) *
>  						sizeof(struct
> vring_used_elem));
> -			}
> +				}
> +
> +				vq->last_async_desc_idx += nr_copy;
> +				vq->last_used_idx += nr_copy;
> +				nr_left -= nr_copy;
> +			} while (nr_left > 0);
> +
> +			__atomic_add_fetch(&vq->used->idx, n_descs,
> +					__ATOMIC_RELEASE);
> +			vhost_vring_call_split(dev, vq);
> +		}
> 
> -			vq->last_async_desc_idx += nr_copy;
> -			vq->last_used_idx += nr_copy;
> -			nr_left -= nr_copy;
> -		} while (nr_left > 0);
> 
> -		__atomic_add_fetch(&vq->used->idx, n_descs,
> __ATOMIC_RELEASE);
> -		vhost_vring_call_split(dev, vq);
> -	} else
> -		vq->last_async_desc_idx += n_descs;
> +
> +	} else {
> +		if (vq_is_packed(dev))
> +			vq->last_async_buffer_idx += n_buffers;
> +		else
> +			vq->last_async_desc_idx += n_descs;
> +	}
> 
>  done:
>  	rte_spinlock_unlock(&vq->access_lock);
> @@ -1767,9 +2151,10 @@ virtio_dev_rx_async_submit(struct virtio_net
> *dev, uint16_t queue_id,
>  	if (count == 0)
>  		goto out;
> 
> -	/* TODO: packed queue not implemented */
>  	if (vq_is_packed(dev))
> -		nb_tx = 0;
> +		nb_tx = virtio_dev_rx_async_submit_packed(dev,
> +				vq, queue_id, pkts, count, comp_pkts,
> +				comp_count);
>  	else
>  		nb_tx = virtio_dev_rx_async_submit_split(dev,
>  				vq, queue_id, pkts, count, comp_pkts,
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v2] vhost: add support for packed ring in async vhost
  2021-03-24  9:19   ` Liu, Yong
@ 2021-03-29 12:29     ` Jiang, Cheng1
  0 siblings, 0 replies; 13+ messages in thread
From: Jiang, Cheng1 @ 2021-03-29 12:29 UTC (permalink / raw)
  To: Liu, Yong, maxime.coquelin, Xia, Chenbo
  Cc: dev, Hu, Jiayu, Yang, YvonneX, Wang, Yinan

Hi,

> -----Original Message-----
> From: Liu, Yong <yong.liu@intel.com>
> Sent: Wednesday, March 24, 2021 5:19 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jiang,
> Cheng1 <cheng1.jiang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2] vhost: add support for packed ring in
> async vhost
> 
> 
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Cheng Jiang
> > Sent: Monday, March 22, 2021 2:15 PM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jiang,
> > Cheng1 <cheng1.jiang@intel.com>
> > Subject: [dpdk-dev] [PATCH v2] vhost: add support for packed ring in
> > async vhost
> >
> > For now async vhost data path only supports split ring structure. In
> > order to make async vhost compatible with virtio 1.1 spec this patch
> > enables packed ring in async vhost data path.
> >
> > Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> > ---
> > v2:
> >   * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
> >   * add async_buffers_packed memory free in vhost_free_async_mem()
> >
> >  lib/librte_vhost/rte_vhost_async.h |   1 +
> >  lib/librte_vhost/vhost.c           |  24 +-
> >  lib/librte_vhost/vhost.h           |   7 +-
> >  lib/librte_vhost/virtio_net.c      | 447 +++++++++++++++++++++++++++--
> >  4 files changed, 441 insertions(+), 38 deletions(-)
> >
> > diff --git a/lib/librte_vhost/rte_vhost_async.h
> > b/lib/librte_vhost/rte_vhost_async.h
> > index c855ff875..6faa31f5a 100644
> > --- a/lib/librte_vhost/rte_vhost_async.h
> > +++ b/lib/librte_vhost/rte_vhost_async.h
> > @@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {  struct
> > async_inflight_info {  struct rte_mbuf *mbuf;  uint16_t descs; /* num
> > of descs inflight */
> > +uint16_t nr_buffers; /* num of buffers inflight for packed ring */
> >  };
> >
> >  /**
> > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index
> > 52ab93d1e..51b44d6f2 100644
> > --- a/lib/librte_vhost/vhost.c
> > +++ b/lib/librte_vhost/vhost.c
> > @@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue
> > *vq)
> >  {
> >  if (vq->async_pkts_info)
> >  rte_free(vq->async_pkts_info);
> > -if (vq->async_descs_split)
> > +if (vq->async_buffers_packed) {
> > +rte_free(vq->async_buffers_packed);
> > +vq->async_buffers_packed = NULL;
> > +} else {
> >  rte_free(vq->async_descs_split);
> > +vq->async_descs_split = NULL;
> > +}
> > +
> >  if (vq->it_pool)
> >  rte_free(vq->it_pool);
> >  if (vq->vec_pool)
> >  rte_free(vq->vec_pool);
> >
> >  vq->async_pkts_info = NULL;
> > -vq->async_descs_split = NULL;
> >  vq->it_pool = NULL;
> >  vq->vec_pool = NULL;
> >  }
> > @@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid,
> > uint16_t queue_id,  return -1;
> >
> >  /* packed queue is not supported */
> > -if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
> > +if (unlikely(!f.async_inorder)) {
> >  VHOST_LOG_CONFIG(ERR,
> > -"async copy is not supported on packed queue or non-inorder mode "
> > +"async copy is not supported on non-inorder mode "
> >  "(vid %d, qid: %d)\n", vid, queue_id);  return -1;  } @@ -1643,10
> > +1648,17 @@ int rte_vhost_async_channel_register(int vid, uint16_t
> > queue_id,  vq->vec_pool = rte_malloc_socket(NULL,
> VHOST_MAX_ASYNC_VEC
> > * sizeof(struct iovec),  RTE_CACHE_LINE_SIZE, node);
> > -vq->async_descs_split = rte_malloc_socket(NULL,
> > +if (vq_is_packed(dev)) {
> > +vq->async_buffers_packed = rte_malloc_socket(NULL, size *
> > +vq->sizeof(struct vring_used_elem_packed),
> > +RTE_CACHE_LINE_SIZE, node);
> > +} else {
> > +vq->async_descs_split = rte_malloc_socket(NULL,
> >  vq->size * sizeof(struct vring_used_elem),  RTE_CACHE_LINE_SIZE,
> > node); -if (!vq->async_descs_split || !vq->async_pkts_info ||
> > +}
> > +
> > +if (!vq->async_pkts_info ||
> >  !vq->it_pool || !vq->vec_pool) {
> >  vhost_free_async_mem(vq);
> >  VHOST_LOG_CONFIG(ERR,
> > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index
> > 658f6fc28..d6324fbf8 100644
> > --- a/lib/librte_vhost/vhost.h
> > +++ b/lib/librte_vhost/vhost.h
> > @@ -206,9 +206,14 @@ struct vhost_virtqueue {  uint16_tasync_pkts_idx;
> > uint16_tasync_pkts_inflight_n;  uint16_tasync_last_pkts_n; -struct
> > vring_used_elem  *async_descs_split;
> > +union {
> > +struct vring_used_elem  *async_descs_split; struct
> > +vring_used_elem_packed *async_buffers_packed; };
> >  uint16_t async_desc_idx;
> > +uint16_t async_packed_buffer_idx;
> >  uint16_t last_async_desc_idx;
> > +uint16_t last_async_buffer_idx;
> >
> >  /* vq async features */
> >  boolasync_inorder;
> > diff --git a/lib/librte_vhost/virtio_net.c
> > b/lib/librte_vhost/virtio_net.c index 583bf379c..fa8c4f4fe 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -363,8 +363,7 @@
> > vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue
> *vq,
> > }
> >
> >  static __rte_always_inline void
> > -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> > -   struct vhost_virtqueue *vq,
> > +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
> >     uint32_t len[],
> >     uint16_t id[],
> >     uint16_t count[],
> > @@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct
> > virtio_net *dev,
> >  vq->shadow_aligned_idx += count[i];
> >  vq->shadow_used_idx++;
> >  }
> > +}
> > +
> > +static __rte_always_inline void
> > +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> > +   struct vhost_virtqueue *vq,
> > +   uint32_t len[],
> > +   uint16_t id[],
> > +   uint16_t count[],
> > +   uint16_t num_buffers)
> > +{
> > +vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
> >
> >  if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
> > do_data_copy_enqueue(dev, vq); @@ -1633,12 +1643,343 @@
> > virtio_dev_rx_async_submit_split(struct
> > virtio_net *dev,
> >  return pkt_idx;
> >  }
> >
> > +static __rte_always_inline int
> > +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> > +    struct vhost_virtqueue *vq,
> > +    struct rte_mbuf *pkt,
> > +    struct buf_vector *buf_vec,
> > +    uint16_t *nr_descs,
> > +    uint16_t *nr_buffers,
> > +    struct iovec *src_iovec, struct iovec *dst_iovec,
> > +    struct rte_vhost_iov_iter *src_it,
> > +    struct rte_vhost_iov_iter *dst_it) { uint16_t nr_vec = 0;
> > +uint16_t avail_idx = vq->last_avail_idx; uint16_t max_tries, tries =
> > +0; uint16_t buf_id = 0; uint32_t len = 0; uint16_t desc_count;
> > +uint32_t size = pkt->pkt_len + sizeof(struct
> > virtio_net_hdr_mrg_rxbuf);
> > +uint32_t buffer_len[vq->size];
> > +uint16_t buffer_buf_id[vq->size];
> > +uint16_t buffer_desc_count[vq->size]; *nr_buffers = 0;
> > +
> > +if (rxvq_is_mergeable(dev))
> > +max_tries = vq->size - 1;
> > +else
> > +max_tries = 1;
> > +
> > +while (size > 0) {
> > +/*
> > + * if we tried all available ring items, and still
> > + * can't get enough buf, it means something abnormal
> > + * happened.
> > + */
> > +if (unlikely(++tries > max_tries))
> > +return -1;
> > +
> > +if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count,
> > +buf_vec, &nr_vec, &buf_id, &len,
> > +VHOST_ACCESS_RW) < 0))
> > +return -1;
> > +
> > +len = RTE_MIN(len, size);
> > +size -= len;
> > +
> > +buffer_len[*nr_buffers] = len;
> > +buffer_buf_id[*nr_buffers] = buf_id;
> > +buffer_desc_count[*nr_buffers] = desc_count; *nr_buffers += 1;
> > +
> > +*nr_descs += desc_count;
> > +avail_idx += desc_count;
> > +if (avail_idx >= vq->size)
> > +avail_idx -= vq->size;
> > +}
> > +
> > +if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> > +src_iovec, dst_iovec, src_it, dst_it) < 0) return -1;
> > +
> > +vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> > +   buffer_desc_count, *nr_buffers);
> > +
> > +return 0;
> > +}
> > +
> > +static __rte_always_inline int16_t
> > +virtio_dev_rx_async_single_packed(struct virtio_net *dev,
> > +    struct vhost_virtqueue *vq,
> > +    struct rte_mbuf *pkt,
> > +    uint16_t *nr_descs, uint16_t *nr_buffers,
> > +    struct iovec *src_iovec, struct iovec *dst_iovec,
> > +    struct rte_vhost_iov_iter *src_it,
> > +    struct rte_vhost_iov_iter *dst_it) { struct buf_vector
> > +buf_vec[BUF_VECTOR_MAX]; *nr_descs = 0; *nr_buffers = 0;
> > +
> > +if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> > buf_vec,
> > + nr_descs,
> > + nr_buffers,
> > + src_iovec, dst_iovec,
> > + src_it, dst_it) < 0)) {
> > +VHOST_LOG_DATA(DEBUG,
> > +"(%d) failed to get enough desc from vring\n",
> > +dev->vid);
> > +return -1;
> > +}
> > +
> > +VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> > index %d\n",
> > +dev->vid, vq->last_avail_idx,
> > +vq->last_avail_idx + *nr_descs);
> > +
> > +return 0;
> > +}
> > +
> > +static __rte_noinline uint32_t
> > +virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct
> > +vhost_virtqueue *vq, uint16_t queue_id, struct rte_mbuf **pkts,
> > +uint32_t count, struct rte_mbuf **comp_pkts, uint32_t *comp_count) {
> 
> Hi Cheng,
> There're some common parts in virtio_dev_rx_async_submit_packed and
> virtio_dev_rx_async_submit_split.
> We can abstract some functions for those common parts which can bring
> more clarity.

Sure, but the structure or variable used by packed ring and split ring are different, it may not be very suitable for abstraction, I will consider it again, thank you.

> 
> Also this patch may be too huge for reviewing, please separate it into few
> parts for better understanding.

I'll make it better in the next version.

> 
> Thanks,
> Marvin
> 
> > +uint32_t pkt_idx = 0, pkt_burst_idx = 0; uint16_t num_buffers;
> > +uint16_t num_desc;
> > +
> > +struct rte_vhost_iov_iter *it_pool = vq->it_pool; struct iovec
> > +*vec_pool = vq->vec_pool; struct rte_vhost_async_desc
> > +tdes[MAX_PKT_BURST]; struct iovec *src_iovec = vec_pool; struct iovec
> > +*dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); struct
> > +rte_vhost_iov_iter *src_it = it_pool; struct rte_vhost_iov_iter
> > +*dst_it = it_pool + 1; uint16_t slot_idx = 0; uint16_t segs_await =
> > +0; struct async_inflight_info *pkts_info = vq->async_pkts_info;
> > +uint32_t n_pkts = 0, pkt_err = 0; uint32_t num_async_pkts = 0,
> > +num_done_pkts = 0; struct { uint16_t pkt_idx; uint16_t
> > +last_avail_idx; } async_pkts_log[MAX_PKT_BURST];
> > +
> > +rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
> > +
> > +for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { if
> > +(unlikely(virtio_dev_rx_async_single_packed(dev, vq, pkts[pkt_idx],
> > +&num_desc, &num_buffers, src_iovec, dst_iovec, src_it, dst_it) < 0))
> > +{ break; }
> > +
> > +VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> > index %d\n",
> > +dev->vid, vq->last_avail_idx,
> > +vq->last_avail_idx + num_desc);
> > +
> > +slot_idx = (vq->async_pkts_idx + num_async_pkts) & (vq->size - 1); if
> > +(src_it->count) { uint16_t from, to;
> > +
> > +async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
> > +pkts_info[slot_idx].descs = num_desc; pkts_info[slot_idx].nr_buffers
> > += num_buffers; pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> > +async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
> > +async_pkts_log[num_async_pkts++].last_avail_idx =
> > +vq->last_avail_idx;
> > +src_iovec += src_it->nr_segs;
> > +dst_iovec += dst_it->nr_segs;
> > +src_it += 2;
> > +dst_it += 2;
> > +segs_await += src_it->nr_segs;
> > +
> > +/**
> > + * recover shadow used ring and keep DMA-occupied
> > + * descriptors.
> > + */
> > +from = vq->shadow_used_idx - num_buffers; to =
> > +vq->async_packed_buffer_idx & (vq->size - 1); if (num_buffers + to <=
> > +vq->size) { rte_memcpy(&vq->async_buffers_packed[to],
> > +&vq->shadow_used_packed[from],
> > +num_buffers *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +} else {
> > +int size = vq->size - to;
> > +
> > +rte_memcpy(&vq->async_buffers_packed[to],
> > +&vq->shadow_used_packed[from],
> > +size *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +rte_memcpy(vq->async_buffers_packed,
> > +&vq->shadow_used_packed[from +
> > +size], (num_buffers - size) *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +}
> > +vq->async_packed_buffer_idx += num_buffers; shadow_used_idx -=
> > +vq->num_buffers;
> > +} else
> > +comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> > +
> > +vq_inc_last_avail_packed(vq, num_desc);
> > +
> > +/*
> > + * conditions to trigger async device transfer:
> > + * - buffered packet number reaches transfer threshold
> > + * - unused async iov number is less than max vhost vector  */ if
> > +(unlikely(pkt_burst_idx >=
> > VHOST_ASYNC_BATCH_THRESHOLD ||
> > +((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> > +BUF_VECTOR_MAX))) {
> > +n_pkts = vq->async_ops.transfer_data(dev->vid,
> > +queue_id, tdes, 0, pkt_burst_idx);
> > +src_iovec = vec_pool;
> > +dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >>
> > 1);
> > +src_it = it_pool;
> > +dst_it = it_pool + 1;
> > +segs_await = 0;
> > +vq->async_pkts_inflight_n += n_pkts;
> > +
> > +if (unlikely(n_pkts < pkt_burst_idx)) {
> > +/*
> > + * log error packets number here and do
> > actual
> > + * error processing when applications poll
> > + * completion
> > + */
> > +pkt_err = pkt_burst_idx - n_pkts;
> > +pkt_burst_idx = 0;
> > +break;
> > +}
> > +
> > +pkt_burst_idx = 0;
> > +}
> > +}
> > +
> > +if (pkt_burst_idx) {
> > +n_pkts = vq->async_ops.transfer_data(dev->vid,
> > +queue_id, tdes, 0, pkt_burst_idx);
> > +vq->async_pkts_inflight_n += n_pkts;
> > +
> > +if (unlikely(n_pkts < pkt_burst_idx)) pkt_err = pkt_burst_idx -
> > +n_pkts; }
> > +
> > +do_data_copy_enqueue(dev, vq);
> > +
> > +if (unlikely(pkt_err)) {
> > +uint16_t num_buffers = 0;
> > +
> > +num_async_pkts -= pkt_err;
> > +/* calculate the sum of descriptors of DMA-error packets. */ while
> > +(pkt_err-- > 0) { num_buffers += pkts_info[slot_idx & (vq->size -
> > +1)].nr_buffers; slot_idx--; }
> > +vq->async_packed_buffer_idx -= num_buffers;
> > +/* recover shadow used ring and available ring */
> > +vq->shadow_used_idx -= (vq->last_avail_idx -
> > +
> > async_pkts_log[num_async_pkts].last_avail_idx -
> > +num_buffers);
> 
> Could it possible that vq->last_avail_idx smaller than
> async_pkts_log[num_async_pkts].last_avail_idx when operations near the
> ring's boundary?

Yes, you are right. Will be fixed, thanks.

Cheng

> 
> > +vq->last_avail_idx =
> > +async_pkts_log[num_async_pkts].last_avail_idx;
> > +pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
> > +num_done_pkts = pkt_idx - num_async_pkts; }
> > +
> > +vq->async_pkts_idx += num_async_pkts;
> > +*comp_count = num_done_pkts;
> > +
> > +if (likely(vq->shadow_used_idx)) {
> > +vhost_flush_enqueue_shadow_packed(dev, vq);
> > +vhost_vring_call_packed(dev, vq); }
> > +
> > +return pkt_idx;
> > +}
> > +
> > +static __rte_always_inline void
> > +vhost_update_used_packed(struct virtio_net *dev,
> > +  struct vhost_virtqueue *vq,
> > +  struct vring_used_elem_packed
> > *shadow_ring,
> > +  uint16_t count)
> > +{
> > +if (count == 0)
> > +return;
> > +int i;
> > +uint16_t used_idx = vq->last_used_idx; uint16_t head_idx =
> > +vq->last_used_idx; uint16_t head_flags = 0;
> > +
> > +/* Split loop in two to save memory barriers */ for (i = 0; i <
> > +count; i++) {
> > +vq->desc_packed[used_idx].id = shadow_ring[i].id;
> > +vq->desc_packed[used_idx].len = shadow_ring[i].len;
> > +
> > +used_idx += shadow_ring[i].count;
> > +if (used_idx >= vq->size)
> > +used_idx -= vq->size;
> > +}
> > +
> > +/* The ordering for storing desc flags needs to be enforced. */
> > +rte_atomic_thread_fence(__ATOMIC_RELEASE);
> > +
> > +for (i = 0; i < count; i++) {
> > +uint16_t flags;
> > +
> > +if (vq->shadow_used_packed[i].len)
> > +flags = VRING_DESC_F_WRITE;
> > +else
> > +flags = 0;
> > +
> > +if (vq->used_wrap_counter) {
> > +flags |= VRING_DESC_F_USED;
> > +flags |= VRING_DESC_F_AVAIL;
> > +} else {
> > +flags &= ~VRING_DESC_F_USED;
> > +flags &= ~VRING_DESC_F_AVAIL;
> > +}
> > +
> > +if (i > 0) {
> > +vq->desc_packed[vq->last_used_idx].flags = flags;
> > +
> > +vhost_log_cache_used_vring(dev, vq,
> > +vq->last_used_idx *
> > +sizeof(struct vring_packed_desc),
> > +sizeof(struct vring_packed_desc));
> > +} else {
> > +head_idx = vq->last_used_idx;
> > +head_flags = flags;
> > +}
> > +
> > +vq_inc_last_used_packed(vq, shadow_ring[i].count); }
> > +
> > +vq->desc_packed[head_idx].flags = head_flags;
> > +
> > +vhost_log_cache_used_vring(dev, vq,
> > +head_idx *
> > +sizeof(struct vring_packed_desc),
> > +sizeof(struct vring_packed_desc));
> > +
> > +vhost_log_cache_sync(dev, vq);
> > +}
> > +
> >  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> > struct rte_mbuf **pkts, uint16_t count)  {  struct virtio_net *dev =
> > get_device(vid);  struct vhost_virtqueue *vq; -uint16_t n_pkts_cpl =
> > 0, n_pkts_put = 0, n_descs = 0;
> > +uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> >  uint16_t start_idx, pkts_idx, vq_size;  struct async_inflight_info
> > *pkts_info;  uint16_t from, i; @@ -1680,53 +2021,96 @@ uint16_t
> > rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,  goto
> > done;  }
> >
> > -for (i = 0; i < n_pkts_put; i++) {
> > -from = (start_idx + i) & (vq_size - 1); -n_descs +=
> > pkts_info[from].descs; -pkts[i] = pkts_info[from].mbuf;
> > +if (vq_is_packed(dev)) {
> > +for (i = 0; i < n_pkts_put; i++) {
> > +from = (start_idx + i) & (vq_size - 1); n_buffers +=
> > +pkts_info[from].nr_buffers; pkts[i] = pkts_info[from].mbuf; } } else
> > +{ for (i = 0; i < n_pkts_put; i++) { from = (start_idx + i) &
> > +(vq_size - 1); n_descs += pkts_info[from].descs; pkts[i] =
> > +pkts_info[from].mbuf; }
> >  }
> > +
> >  vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
> > vq->async_pkts_inflight_n -= n_pkts_put;
> >
> >  if (likely(vq->enabled && vq->access_ok)) { -uint16_t nr_left =
> > n_descs;  uint16_t nr_copy;  uint16_t to;
> >
> >  /* write back completed descriptors to used ring */ -do { -from =
> > vq->last_async_desc_idx & (vq->size - 1); -nr_copy = nr_left + from <=
> > vq->size ? nr_left :
> > -vq->size - from;
> > -to = vq->last_used_idx & (vq->size - 1);
> > -
> > -if (to + nr_copy <= vq->size) {
> > -rte_memcpy(&vq->used->ring[to],
> > +if (vq_is_packed(dev)) {
> > +uint16_t nr_left = n_buffers;
> > +uint16_t to;
> > +do {
> > +from = vq->last_async_buffer_idx &
> > +(vq->size - 1);
> > +to = (from + nr_left) & (vq->size - 1);
> > +
> > +if (to > from) {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed +
> > from,
> > +to - from);
> > +vq->last_async_buffer_idx += nr_left;
> > +nr_left = 0;
> > +} else {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed +
> > from,
> > +vq->size - from);
> > +vq->last_async_buffer_idx +=
> > +vq->size -
> > from;
> > +nr_left -= vq->size - from;
> > +}
> > +} while (nr_left > 0);
> > +vhost_vring_call_packed(dev, vq);
> > +} else {
> > +uint16_t nr_left = n_descs;
> > +do {
> > +from = vq->last_async_desc_idx & (vq->size -
> > 1);
> > +nr_copy = nr_left + from <= vq->size ? nr_left :
> > +vq->size - from;
> > +to = vq->last_used_idx & (vq->size - 1);
> > +
> > +if (to + nr_copy <= vq->size) {
> > +rte_memcpy(&vq->used->ring[to],
> >  &vq-
> > >async_descs_split[from],
> >  nr_copy *
> >  sizeof(struct
> > vring_used_elem));
> > -} else {
> > -uint16_t size = vq->size - to;
> > +} else {
> > +uint16_t size = vq->size - to;
> >
> > -rte_memcpy(&vq->used->ring[to],
> > +rte_memcpy(&vq->used->ring[to],
> >  &vq-
> > >async_descs_split[from],
> >  size *
> >  sizeof(struct
> > vring_used_elem));
> > -rte_memcpy(vq->used->ring,
> > +rte_memcpy(vq->used->ring,
> >  &vq->async_descs_split[from
> > +
> >  size], (nr_copy - size) *
> >  sizeof(struct
> > vring_used_elem));
> > -}
> > +}
> > +
> > +vq->last_async_desc_idx += nr_copy;
> > +vq->last_used_idx += nr_copy;
> > +nr_left -= nr_copy;
> > +} while (nr_left > 0);
> > +
> > +__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
> > +vhost_vring_call_split(dev, vq); }
> >
> > -vq->last_async_desc_idx += nr_copy;
> > -vq->last_used_idx += nr_copy;
> > -nr_left -= nr_copy;
> > -} while (nr_left > 0);
> >
> > -__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
> > -vhost_vring_call_split(dev, vq); -} else
> > -vq->last_async_desc_idx += n_descs;
> > +
> > +} else {
> > +if (vq_is_packed(dev))
> > +vq->last_async_buffer_idx += n_buffers;
> > +else
> > +vq->last_async_desc_idx += n_descs;
> > +}
> >
> >  done:
> >  rte_spinlock_unlock(&vq->access_lock);
> > @@ -1767,9 +2151,10 @@ virtio_dev_rx_async_submit(struct virtio_net
> > *dev, uint16_t queue_id,  if (count == 0)  goto out;
> >
> > -/* TODO: packed queue not implemented */  if (vq_is_packed(dev))
> > -nb_tx = 0;
> > +nb_tx = virtio_dev_rx_async_submit_packed(dev,
> > +vq, queue_id, pkts, count, comp_pkts, comp_count);
> >  else
> >  nb_tx = virtio_dev_rx_async_submit_split(dev,
> >  vq, queue_id, pkts, count, comp_pkts,
> > --
> > 2.29.2
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v3] vhost: add support for packed ring in async vhost
  2021-03-17  8:54 [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost Cheng Jiang
  2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
@ 2021-03-31 14:06 ` Cheng Jiang
  2021-04-07  6:26   ` Hu, Jiayu
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
  2 siblings, 1 reply; 13+ messages in thread
From: Cheng Jiang @ 2021-03-31 14:06 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

For now async vhost data path only supports split ring structure. In
order to make async vhost compatible with virtio 1.1 spec this patch
enables packed ring in async vhost data path.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
v3:
  * fix error handler for DMA-copy packet
  * remove variables that are no longer needed
v2:
  * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
  * add async_buffers_packed memory free in vhost_free_async_mem()

 lib/librte_vhost/rte_vhost_async.h |   1 +
 lib/librte_vhost/vhost.c           |  24 +-
 lib/librte_vhost/vhost.h           |   7 +-
 lib/librte_vhost/virtio_net.c      | 463 +++++++++++++++++++++++++++--
 4 files changed, 457 insertions(+), 38 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost_async.h b/lib/librte_vhost/rte_vhost_async.h
index c855ff875..6faa31f5a 100644
--- a/lib/librte_vhost/rte_vhost_async.h
+++ b/lib/librte_vhost/rte_vhost_async.h
@@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
 struct async_inflight_info {
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
+	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
 };

 /**
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index 52ab93d1e..51b44d6f2 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue *vq)
 {
 	if (vq->async_pkts_info)
 		rte_free(vq->async_pkts_info);
-	if (vq->async_descs_split)
+	if (vq->async_buffers_packed) {
+		rte_free(vq->async_buffers_packed);
+		vq->async_buffers_packed = NULL;
+	} else {
 		rte_free(vq->async_descs_split);
+		vq->async_descs_split = NULL;
+	}
+
 	if (vq->it_pool)
 		rte_free(vq->it_pool);
 	if (vq->vec_pool)
 		rte_free(vq->vec_pool);

 	vq->async_pkts_info = NULL;
-	vq->async_descs_split = NULL;
 	vq->it_pool = NULL;
 	vq->vec_pool = NULL;
 }
@@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 		return -1;

 	/* packed queue is not supported */
-	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+	if (unlikely(!f.async_inorder)) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on packed queue or non-inorder mode "
+			"async copy is not supported on non-inorder mode "
 			"(vid %d, qid: %d)\n", vid, queue_id);
 		return -1;
 	}
@@ -1643,10 +1648,17 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->vec_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
 			RTE_CACHE_LINE_SIZE, node);
-	vq->async_descs_split = rte_malloc_socket(NULL,
+	if (vq_is_packed(dev)) {
+		vq->async_buffers_packed = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem_packed),
+			RTE_CACHE_LINE_SIZE, node);
+	} else {
+		vq->async_descs_split = rte_malloc_socket(NULL,
 			vq->size * sizeof(struct vring_used_elem),
 			RTE_CACHE_LINE_SIZE, node);
-	if (!vq->async_descs_split || !vq->async_pkts_info ||
+	}
+
+	if (!vq->async_pkts_info ||
 		!vq->it_pool || !vq->vec_pool) {
 		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 658f6fc28..d6324fbf8 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -206,9 +206,14 @@ struct vhost_virtqueue {
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
 	uint16_t	async_last_pkts_n;
-	struct vring_used_elem  *async_descs_split;
+	union {
+		struct vring_used_elem  *async_descs_split;
+		struct vring_used_elem_packed *async_buffers_packed;
+	};
 	uint16_t async_desc_idx;
+	uint16_t async_packed_buffer_idx;
 	uint16_t last_async_desc_idx;
+	uint16_t last_async_buffer_idx;

 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 583bf379c..fa2dfde02 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -363,8 +363,7 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
 }

 static __rte_always_inline void
-vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
-				   struct vhost_virtqueue *vq,
+vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 				   uint32_t len[],
 				   uint16_t id[],
 				   uint16_t count[],
@@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 		vq->shadow_aligned_idx += count[i];
 		vq->shadow_used_idx++;
 	}
+}
+
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t len[],
+				   uint16_t id[],
+				   uint16_t count[],
+				   uint16_t num_buffers)
+{
+	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);

 	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
 		do_data_copy_enqueue(dev, vq);
@@ -1452,6 +1462,73 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
 		(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
 }

+static __rte_always_inline void
+vhost_update_used_packed(struct virtio_net *dev,
+				  struct vhost_virtqueue *vq,
+				  struct vring_used_elem_packed *shadow_ring,
+				  uint16_t count)
+{
+	if (count == 0)
+		return;
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < count; i++) {
+		vq->desc_packed[used_idx].id = shadow_ring[i].id;
+		vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+		used_idx += shadow_ring[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	/* The ordering for storing desc flags needs to be enforced. */
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	for (i = 0; i < count; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+
+			vhost_log_cache_used_vring(dev, vq,
+					vq->last_used_idx *
+					sizeof(struct vring_packed_desc),
+					sizeof(struct vring_packed_desc));
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, shadow_ring[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+
+	vhost_log_cache_used_vring(dev, vq,
+				head_idx *
+				sizeof(struct vring_packed_desc),
+				sizeof(struct vring_packed_desc));
+
+	vhost_log_cache_sync(dev, vq);
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, uint16_t queue_id,
@@ -1633,12 +1710,292 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }

+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    struct buf_vector *buf_vec,
+			    uint16_t *nr_descs,
+			    uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count;
+	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
+	*nr_buffers = 0;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		buffer_len[*nr_buffers] = len;
+		buffer_buf_id[*nr_buffers] = buf_id;
+		buffer_desc_count[*nr_buffers] = desc_count;
+		*nr_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
+		src_iovec, dst_iovec, src_it, dst_it) < 0)
+		return -1;
+
+	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
+					   buffer_desc_count, *nr_buffers);
+
+	return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    uint16_t *nr_descs, uint16_t *nr_buffers,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	*nr_descs = 0;
+	*nr_buffers = 0;
+
+	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec,
+						 nr_descs,
+						 nr_buffers,
+						 src_iovec, dst_iovec,
+						 src_it, dst_it) < 0)) {
+		VHOST_LOG_DATA(DEBUG,
+				"(%d) failed to get enough desc from vring\n",
+				dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + *nr_descs);
+
+	return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count,
+	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint16_t num_buffers;
+	uint16_t num_desc;
+
+	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+	struct iovec *vec_pool = vq->vec_pool;
+	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+	struct iovec *src_iovec = vec_pool;
+	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct rte_vhost_iov_iter *src_it = it_pool;
+	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+	uint16_t slot_idx = 0;
+	uint16_t segs_await = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+	uint32_t n_pkts = 0, pkt_err = 0;
+	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+
+	rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
+
+	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
+						pkts[pkt_idx],
+						&num_desc, &num_buffers,
+						src_iovec, dst_iovec,
+						src_it, dst_it) < 0)) {
+			break;
+		}
+
+		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + num_desc);
+
+		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+			(vq->size - 1);
+		if (src_it->count) {
+			uint16_t from, to;
+
+			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+			pkts_info[slot_idx].descs = num_desc;
+			pkts_info[slot_idx].nr_buffers = num_buffers;
+			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+			num_async_pkts++;
+			src_iovec += src_it->nr_segs;
+			dst_iovec += dst_it->nr_segs;
+			src_it += 2;
+			dst_it += 2;
+			segs_await += src_it->nr_segs;
+
+			/**
+			 * recover shadow used ring and keep DMA-occupied
+			 * descriptors.
+			 */
+			from = vq->shadow_used_idx - num_buffers;
+			to = vq->async_packed_buffer_idx & (vq->size - 1);
+			if (num_buffers + to <= vq->size) {
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					num_buffers *
+					sizeof(struct vring_used_elem_packed));
+			} else {
+				int size = vq->size - to;
+
+				rte_memcpy(&vq->async_buffers_packed[to],
+					&vq->shadow_used_packed[from],
+					size *
+					sizeof(struct vring_used_elem_packed));
+				rte_memcpy(vq->async_buffers_packed,
+					&vq->shadow_used_packed[from +
+					size], (num_buffers - size) *
+					sizeof(struct vring_used_elem_packed));
+			}
+			vq->async_packed_buffer_idx += num_buffers;
+			vq->shadow_used_idx -= num_buffers;
+		} else
+			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+
+		vq_inc_last_avail_packed(vq, num_desc);
+
+		/*
+		 * conditions to trigger async device transfer:
+		 * - buffered packet number reaches transfer threshold
+		 * - unused async iov number is less than max vhost vector
+		 */
+		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+			BUF_VECTOR_MAX))) {
+			n_pkts = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			src_iovec = vec_pool;
+			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+			src_it = it_pool;
+			dst_it = it_pool + 1;
+			segs_await = 0;
+			vq->async_pkts_inflight_n += n_pkts;
+
+			if (unlikely(n_pkts < pkt_burst_idx)) {
+				/*
+				 * log error packets number here and do actual
+				 * error processing when applications poll
+				 * completion
+				 */
+				pkt_err = pkt_burst_idx - n_pkts;
+				pkt_burst_idx = 0;
+				pkt_idx++;
+				break;
+			}
+
+			pkt_burst_idx = 0;
+		}
+	}
+
+	if (pkt_burst_idx) {
+		n_pkts = vq->async_ops.transfer_data(dev->vid,
+				queue_id, tdes, 0, pkt_burst_idx);
+		vq->async_pkts_inflight_n += n_pkts;
+
+		if (unlikely(n_pkts < pkt_burst_idx))
+			pkt_err = pkt_burst_idx - n_pkts;
+	}
+
+	do_data_copy_enqueue(dev, vq);
+
+	if (unlikely(pkt_err)) {
+		uint16_t buffers_err = 0;
+		uint16_t async_buffer_idx;
+		uint16_t i;
+
+		num_async_pkts -= pkt_err;
+		pkt_idx -= pkt_err;
+		/* calculate the sum of buffers of DMA-error packets. */
+		while (pkt_err-- > 0) {
+			buffers_err +=
+				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
+			slot_idx--;
+		}
+
+		vq->async_packed_buffer_idx -= buffers_err;
+		async_buffer_idx = vq->async_packed_buffer_idx;
+		/* set 0 to the length of descriptors of DMA-error packets */
+		for (i = 0; i < buffers_err; i++) {
+			vq->async_buffers_packed[(async_buffer_idx + i)
+						& (vq->size - 1)].len = 0;
+		}
+		/* write back DMA-error descriptors to used ring */
+		do {
+			uint16_t from = async_buffer_idx & (vq->size - 1);
+			uint16_t to = (from + buffers_err) & (vq->size - 1);
+
+			if (to > from) {
+				vhost_update_used_packed(dev, vq,
+					vq->async_buffers_packed + from,
+					to - from);
+				buffers_err = 0;
+			} else {
+				vhost_update_used_packed(dev, vq,
+					vq->async_buffers_packed + from,
+					vq->size - from);
+				buffers_err -= vq->size - from;
+			}
+		} while (buffers_err > 0);
+		vhost_vring_call_packed(dev, vq);
+		num_done_pkts = pkt_idx - num_async_pkts;
+	}
+
+	vq->async_pkts_idx += num_async_pkts;
+	*comp_count = num_done_pkts;
+
+	if (likely(vq->shadow_used_idx)) {
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+
+	return pkt_idx;
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
@@ -1680,53 +2037,96 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto done;
 	}

-	for (i = 0; i < n_pkts_put; i++) {
-		from = (start_idx + i) & (vq_size - 1);
-		n_descs += pkts_info[from].descs;
-		pkts[i] = pkts_info[from].mbuf;
+	if (vq_is_packed(dev)) {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_buffers += pkts_info[from].nr_buffers;
+			pkts[i] = pkts_info[from].mbuf;
+		}
+	} else {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_descs += pkts_info[from].descs;
+			pkts[i] = pkts_info[from].mbuf;
+		}
 	}
+
 	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
 	vq->async_pkts_inflight_n -= n_pkts_put;

 	if (likely(vq->enabled && vq->access_ok)) {
-		uint16_t nr_left = n_descs;
 		uint16_t nr_copy;
 		uint16_t to;

 		/* write back completed descriptors to used ring */
-		do {
-			from = vq->last_async_desc_idx & (vq->size - 1);
-			nr_copy = nr_left + from <= vq->size ? nr_left :
-				vq->size - from;
-			to = vq->last_used_idx & (vq->size - 1);
-
-			if (to + nr_copy <= vq->size) {
-				rte_memcpy(&vq->used->ring[to],
+		if (vq_is_packed(dev)) {
+			uint16_t nr_left = n_buffers;
+			uint16_t to;
+			do {
+				from = vq->last_async_buffer_idx &
+								(vq->size - 1);
+				to = (from + nr_left) & (vq->size - 1);
+
+				if (to > from) {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						to - from);
+					vq->last_async_buffer_idx += nr_left;
+					nr_left = 0;
+				} else {
+					vhost_update_used_packed(dev, vq,
+						vq->async_buffers_packed + from,
+						vq->size - from);
+					vq->last_async_buffer_idx +=
+								vq->size - from;
+					nr_left -= vq->size - from;
+				}
+			} while (nr_left > 0);
+			vhost_vring_call_packed(dev, vq);
+		} else {
+			uint16_t nr_left = n_descs;
+			do {
+				from = vq->last_async_desc_idx & (vq->size - 1);
+				nr_copy = nr_left + from <= vq->size ? nr_left :
+					vq->size - from;
+				to = vq->last_used_idx & (vq->size - 1);
+
+				if (to + nr_copy <= vq->size) {
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						nr_copy *
 						sizeof(struct vring_used_elem));
-			} else {
-				uint16_t size = vq->size - to;
+				} else {
+					uint16_t size = vq->size - to;

-				rte_memcpy(&vq->used->ring[to],
+					rte_memcpy(&vq->used->ring[to],
 						&vq->async_descs_split[from],
 						size *
 						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->used->ring,
+					rte_memcpy(vq->used->ring,
 						&vq->async_descs_split[from +
 						size], (nr_copy - size) *
 						sizeof(struct vring_used_elem));
-			}
+				}
+
+				vq->last_async_desc_idx += nr_copy;
+				vq->last_used_idx += nr_copy;
+				nr_left -= nr_copy;
+			} while (nr_left > 0);
+
+			__atomic_add_fetch(&vq->used->idx, n_descs,
+					__ATOMIC_RELEASE);
+			vhost_vring_call_split(dev, vq);
+		}

-			vq->last_async_desc_idx += nr_copy;
-			vq->last_used_idx += nr_copy;
-			nr_left -= nr_copy;
-		} while (nr_left > 0);

-		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-		vhost_vring_call_split(dev, vq);
-	} else
-		vq->last_async_desc_idx += n_descs;
+
+	} else {
+		if (vq_is_packed(dev))
+			vq->last_async_buffer_idx += n_buffers;
+		else
+			vq->last_async_desc_idx += n_descs;
+	}

 done:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -1767,9 +2167,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (count == 0)
 		goto out;

-	/* TODO: packed queue not implemented */
 	if (vq_is_packed(dev))
-		nb_tx = 0;
+		nb_tx = virtio_dev_rx_async_submit_packed(dev,
+				vq, queue_id, pkts, count, comp_pkts,
+				comp_count);
 	else
 		nb_tx = virtio_dev_rx_async_submit_split(dev,
 				vq, queue_id, pkts, count, comp_pkts,
--
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add support for packed ring in async vhost
  2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
@ 2021-04-07  6:26   ` Hu, Jiayu
  2021-04-08 12:01     ` Jiang, Cheng1
  0 siblings, 1 reply; 13+ messages in thread
From: Hu, Jiayu @ 2021-04-07  6:26 UTC (permalink / raw)
  To: Jiang, Cheng1, maxime.coquelin, Xia, Chenbo
  Cc: dev, Yang, YvonneX, Wang, Yinan

Hi Cheng,

Some comments are inline.

> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Wednesday, March 31, 2021 10:06 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jiang,
> Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v3] vhost: add support for packed ring in async vhost
> 
> For now async vhost data path only supports split ring structure. In
> order to make async vhost compatible with virtio 1.1 spec this patch
> enables packed ring in async vhost data path.
> 
> Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> ---
> v3:
>   * fix error handler for DMA-copy packet
>   * remove variables that are no longer needed
> v2:
>   * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
>   * add async_buffers_packed memory free in vhost_free_async_mem()
> 
>  lib/librte_vhost/rte_vhost_async.h |   1 +
>  lib/librte_vhost/vhost.c           |  24 +-
>  lib/librte_vhost/vhost.h           |   7 +-
>  lib/librte_vhost/virtio_net.c      | 463 +++++++++++++++++++++++++++--
>  4 files changed, 457 insertions(+), 38 deletions(-)
> 
> diff --git a/lib/librte_vhost/rte_vhost_async.h
> b/lib/librte_vhost/rte_vhost_async.h
> index c855ff875..6faa31f5a 100644
> --- a/lib/librte_vhost/rte_vhost_async.h
> +++ b/lib/librte_vhost/rte_vhost_async.h
> @@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
>  struct async_inflight_info {
>  	struct rte_mbuf *mbuf;
>  	uint16_t descs; /* num of descs inflight */
> +	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
>  };
> 
>  /**
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 52ab93d1e..51b44d6f2 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue
> *vq)
>  {
>  	if (vq->async_pkts_info)
>  		rte_free(vq->async_pkts_info);
> -	if (vq->async_descs_split)
> +	if (vq->async_buffers_packed) {
> +		rte_free(vq->async_buffers_packed);
> +		vq->async_buffers_packed = NULL;
> +	} else {
>  		rte_free(vq->async_descs_split);
> +		vq->async_descs_split = NULL;
> +	}
> +
>  	if (vq->it_pool)
>  		rte_free(vq->it_pool);
>  	if (vq->vec_pool)
>  		rte_free(vq->vec_pool);
> 
>  	vq->async_pkts_info = NULL;
> -	vq->async_descs_split = NULL;
>  	vq->it_pool = NULL;
>  	vq->vec_pool = NULL;
>  }
> @@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>  		return -1;
> 
>  	/* packed queue is not supported */
> -	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
> +	if (unlikely(!f.async_inorder)) {
>  		VHOST_LOG_CONFIG(ERR,
> -			"async copy is not supported on packed queue or
> non-inorder mode "
> +			"async copy is not supported on non-inorder mode "
>  			"(vid %d, qid: %d)\n", vid, queue_id);
>  		return -1;
>  	}
> @@ -1643,10 +1648,17 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>  	vq->vec_pool = rte_malloc_socket(NULL,
>  			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
>  			RTE_CACHE_LINE_SIZE, node);
> -	vq->async_descs_split = rte_malloc_socket(NULL,
> +	if (vq_is_packed(dev)) {
> +		vq->async_buffers_packed = rte_malloc_socket(NULL,
> +			vq->size * sizeof(struct vring_used_elem_packed),
> +			RTE_CACHE_LINE_SIZE, node);
> +	} else {
> +		vq->async_descs_split = rte_malloc_socket(NULL,
>  			vq->size * sizeof(struct vring_used_elem),
>  			RTE_CACHE_LINE_SIZE, node);
> -	if (!vq->async_descs_split || !vq->async_pkts_info ||
> +	}
> +
> +	if (!vq->async_pkts_info ||

Need to check if malloc fails for async_buffers_packed.

>  		!vq->it_pool || !vq->vec_pool) {
>  		vhost_free_async_mem(vq);
>  		VHOST_LOG_CONFIG(ERR,
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 658f6fc28..d6324fbf8 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -206,9 +206,14 @@ struct vhost_virtqueue {
>  	uint16_t	async_pkts_idx;
>  	uint16_t	async_pkts_inflight_n;
>  	uint16_t	async_last_pkts_n;
> -	struct vring_used_elem  *async_descs_split;
> +	union {
> +		struct vring_used_elem  *async_descs_split;
> +		struct vring_used_elem_packed *async_buffers_packed;
> +	};
>  	uint16_t async_desc_idx;
> +	uint16_t async_packed_buffer_idx;
>  	uint16_t last_async_desc_idx;
> +	uint16_t last_async_buffer_idx;
> 
>  	/* vq async features */
>  	bool		async_inorder;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 583bf379c..fa2dfde02 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -363,8 +363,7 @@
> vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
>  }
> 
>  static __rte_always_inline void
> -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> -				   struct vhost_virtqueue *vq,
> +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
>  				   uint32_t len[],
>  				   uint16_t id[],
>  				   uint16_t count[],
> @@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct
> virtio_net *dev,
>  		vq->shadow_aligned_idx += count[i];
>  		vq->shadow_used_idx++;
>  	}
> +}
> +
> +static __rte_always_inline void
> +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> +				   struct vhost_virtqueue *vq,
> +				   uint32_t len[],
> +				   uint16_t id[],
> +				   uint16_t count[],
> +				   uint16_t num_buffers)
> +{
> +	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
> 
>  	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
>  		do_data_copy_enqueue(dev, vq);
> @@ -1452,6 +1462,73 @@ virtio_dev_rx_async_get_info_idx(uint16_t
> pkts_idx,
>  		(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
>  }
> 
> +static __rte_always_inline void
> +vhost_update_used_packed(struct virtio_net *dev,
> +				  struct vhost_virtqueue *vq,
> +				  struct vring_used_elem_packed
> *shadow_ring,
> +				  uint16_t count)
> +{
> +	if (count == 0)
> +		return;
> +	int i;
> +	uint16_t used_idx = vq->last_used_idx;
> +	uint16_t head_idx = vq->last_used_idx;
> +	uint16_t head_flags = 0;
> +
> +	/* Split loop in two to save memory barriers */
> +	for (i = 0; i < count; i++) {
> +		vq->desc_packed[used_idx].id = shadow_ring[i].id;
> +		vq->desc_packed[used_idx].len = shadow_ring[i].len;
> +
> +		used_idx += shadow_ring[i].count;
> +		if (used_idx >= vq->size)
> +			used_idx -= vq->size;
> +	}
> +
> +	/* The ordering for storing desc flags needs to be enforced. */
> +	rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +
> +	for (i = 0; i < count; i++) {
> +		uint16_t flags;
> +
> +		if (vq->shadow_used_packed[i].len)
> +			flags = VRING_DESC_F_WRITE;
> +		else
> +			flags = 0;
> +
> +		if (vq->used_wrap_counter) {
> +			flags |= VRING_DESC_F_USED;
> +			flags |= VRING_DESC_F_AVAIL;
> +		} else {
> +			flags &= ~VRING_DESC_F_USED;
> +			flags &= ~VRING_DESC_F_AVAIL;
> +		}
> +
> +		if (i > 0) {
> +			vq->desc_packed[vq->last_used_idx].flags = flags;
> +
> +			vhost_log_cache_used_vring(dev, vq,
> +					vq->last_used_idx *
> +					sizeof(struct vring_packed_desc),
> +					sizeof(struct vring_packed_desc));
> +		} else {
> +			head_idx = vq->last_used_idx;
> +			head_flags = flags;
> +		}
> +
> +		vq_inc_last_used_packed(vq, shadow_ring[i].count);
> +	}
> +
> +	vq->desc_packed[head_idx].flags = head_flags;
> +
> +	vhost_log_cache_used_vring(dev, vq,
> +				head_idx *
> +				sizeof(struct vring_packed_desc),
> +				sizeof(struct vring_packed_desc));
> +
> +	vhost_log_cache_sync(dev, vq);

Async enqueue of packed ring has no support of live migration.
The above code is not needed.

> +}
> +
>  static __rte_noinline uint32_t
>  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  	struct vhost_virtqueue *vq, uint16_t queue_id,
> @@ -1633,12 +1710,292 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>  	return pkt_idx;
>  }
> 
> +static __rte_always_inline int
> +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> +			    struct vhost_virtqueue *vq,
> +			    struct rte_mbuf *pkt,
> +			    struct buf_vector *buf_vec,
> +			    uint16_t *nr_descs,
> +			    uint16_t *nr_buffers,
> +			    struct iovec *src_iovec, struct iovec *dst_iovec,
> +			    struct rte_vhost_iov_iter *src_it,
> +			    struct rte_vhost_iov_iter *dst_it)
> +{
> +	uint16_t nr_vec = 0;
> +	uint16_t avail_idx = vq->last_avail_idx;
> +	uint16_t max_tries, tries = 0;
> +	uint16_t buf_id = 0;
> +	uint32_t len = 0;
> +	uint16_t desc_count;
> +	uint32_t size = pkt->pkt_len + sizeof(struct
> virtio_net_hdr_mrg_rxbuf);
> +	uint32_t buffer_len[vq->size];
> +	uint16_t buffer_buf_id[vq->size];
> +	uint16_t buffer_desc_count[vq->size];
> +	*nr_buffers = 0;
> +
> +	if (rxvq_is_mergeable(dev))
> +		max_tries = vq->size - 1;
> +	else
> +		max_tries = 1;
> +
> +	while (size > 0) {
> +		/*
> +		 * if we tried all available ring items, and still
> +		 * can't get enough buf, it means something abnormal
> +		 * happened.
> +		 */
> +		if (unlikely(++tries > max_tries))
> +			return -1;
> +
> +		if (unlikely(fill_vec_buf_packed(dev, vq,
> +						avail_idx, &desc_count,
> +						buf_vec, &nr_vec,
> +						&buf_id, &len,
> +						VHOST_ACCESS_RW) < 0))
> +			return -1;
> +
> +		len = RTE_MIN(len, size);
> +		size -= len;
> +
> +		buffer_len[*nr_buffers] = len;
> +		buffer_buf_id[*nr_buffers] = buf_id;
> +		buffer_desc_count[*nr_buffers] = desc_count;
> +		*nr_buffers += 1;
> +
> +		*nr_descs += desc_count;
> +		avail_idx += desc_count;
> +		if (avail_idx >= vq->size)
> +			avail_idx -= vq->size;
> +	}
> +
> +	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> +		src_iovec, dst_iovec, src_it, dst_it) < 0)
> +		return -1;
> +
> +	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> +					   buffer_desc_count, *nr_buffers);
> +
> +	return 0;
> +}
> +
> +static __rte_always_inline int16_t
> +virtio_dev_rx_async_single_packed(struct virtio_net *dev,
> +			    struct vhost_virtqueue *vq,
> +			    struct rte_mbuf *pkt,
> +			    uint16_t *nr_descs, uint16_t *nr_buffers,
> +			    struct iovec *src_iovec, struct iovec *dst_iovec,
> +			    struct rte_vhost_iov_iter *src_it,
> +			    struct rte_vhost_iov_iter *dst_it)
> +{
> +	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> +	*nr_descs = 0;
> +	*nr_buffers = 0;
> +
> +	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> buf_vec,
> +						 nr_descs,
> +						 nr_buffers,
> +						 src_iovec, dst_iovec,
> +						 src_it, dst_it) < 0)) {
> +		VHOST_LOG_DATA(DEBUG,
> +				"(%d) failed to get enough desc from vring\n",
> +				dev->vid);
> +		return -1;
> +	}
> +
> +	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> +			dev->vid, vq->last_avail_idx,
> +			vq->last_avail_idx + *nr_descs);
> +
> +	return 0;
> +}
> +
> +static __rte_noinline uint32_t
> +virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
> +	struct vhost_virtqueue *vq, uint16_t queue_id,
> +	struct rte_mbuf **pkts, uint32_t count,
> +	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
> +{
> +	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
> +	uint16_t num_buffers;
> +	uint16_t num_desc;
> +
> +	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
> +	struct iovec *vec_pool = vq->vec_pool;
> +	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
> +	struct iovec *src_iovec = vec_pool;
> +	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
> +	struct rte_vhost_iov_iter *src_it = it_pool;
> +	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
> +	uint16_t slot_idx = 0;
> +	uint16_t segs_await = 0;
> +	struct async_inflight_info *pkts_info = vq->async_pkts_info;
> +	uint32_t n_pkts = 0, pkt_err = 0;
> +	uint32_t num_async_pkts = 0, num_done_pkts = 0;
> +
> +	rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
> +
> +	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
> +		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
> +						pkts[pkt_idx],
> +						&num_desc, &num_buffers,
> +						src_iovec, dst_iovec,
> +						src_it, dst_it) < 0)) {
> +			break;
> +		}
> +
> +		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> +			dev->vid, vq->last_avail_idx,
> +			vq->last_avail_idx + num_desc);
> +
> +		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
> +			(vq->size - 1);
> +		if (src_it->count) {
> +			uint16_t from, to;
> +
> +			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
> +			pkts_info[slot_idx].descs = num_desc;
> +			pkts_info[slot_idx].nr_buffers = num_buffers;
> +			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> +			num_async_pkts++;
> +			src_iovec += src_it->nr_segs;
> +			dst_iovec += dst_it->nr_segs;
> +			src_it += 2;
> +			dst_it += 2;
> +			segs_await += src_it->nr_segs;
> +
> +			/**
> +			 * recover shadow used ring and keep DMA-occupied
> +			 * descriptors.
> +			 */
> +			from = vq->shadow_used_idx - num_buffers;
> +			to = vq->async_packed_buffer_idx & (vq->size - 1);
> +			if (num_buffers + to <= vq->size) {
> +				rte_memcpy(&vq->async_buffers_packed[to],
> +					&vq->shadow_used_packed[from],
> +					num_buffers *
> +					sizeof(struct
> vring_used_elem_packed));
> +			} else {
> +				int size = vq->size - to;
> +
> +				rte_memcpy(&vq->async_buffers_packed[to],
> +					&vq->shadow_used_packed[from],
> +					size *
> +					sizeof(struct
> vring_used_elem_packed));
> +				rte_memcpy(vq->async_buffers_packed,
> +					&vq->shadow_used_packed[from +
> +					size], (num_buffers - size) *
> +					sizeof(struct
> vring_used_elem_packed));
> +			}
> +			vq->async_packed_buffer_idx += num_buffers;
> +			vq->shadow_used_idx -= num_buffers;
> +		} else
> +			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> +
> +		vq_inc_last_avail_packed(vq, num_desc);
> +
> +		/*
> +		 * conditions to trigger async device transfer:
> +		 * - buffered packet number reaches transfer threshold
> +		 * - unused async iov number is less than max vhost vector
> +		 */
> +		if (unlikely(pkt_burst_idx >=
> VHOST_ASYNC_BATCH_THRESHOLD ||
> +			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> +			BUF_VECTOR_MAX))) {
> +			n_pkts = vq->async_ops.transfer_data(dev->vid,
> +					queue_id, tdes, 0, pkt_burst_idx);
> +			src_iovec = vec_pool;
> +			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >>
> 1);
> +			src_it = it_pool;
> +			dst_it = it_pool + 1;
> +			segs_await = 0;
> +			vq->async_pkts_inflight_n += n_pkts;
> +
> +			if (unlikely(n_pkts < pkt_burst_idx)) {
> +				/*
> +				 * log error packets number here and do
> actual
> +				 * error processing when applications poll
> +				 * completion
> +				 */
> +				pkt_err = pkt_burst_idx - n_pkts;
> +				pkt_burst_idx = 0;
> +				pkt_idx++;
> +				break;
> +			}
> +
> +			pkt_burst_idx = 0;
> +		}
> +	}
> +
> +	if (pkt_burst_idx) {
> +		n_pkts = vq->async_ops.transfer_data(dev->vid,
> +				queue_id, tdes, 0, pkt_burst_idx);
> +		vq->async_pkts_inflight_n += n_pkts;
> +
> +		if (unlikely(n_pkts < pkt_burst_idx))
> +			pkt_err = pkt_burst_idx - n_pkts;
> +	}
> +
> +	do_data_copy_enqueue(dev, vq);
> +
> +	if (unlikely(pkt_err)) {
> +		uint16_t buffers_err = 0;
> +		uint16_t async_buffer_idx;
> +		uint16_t i;
> +
> +		num_async_pkts -= pkt_err;
> +		pkt_idx -= pkt_err;
> +		/* calculate the sum of buffers of DMA-error packets. */
> +		while (pkt_err-- > 0) {
> +			buffers_err +=
> +				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
> +			slot_idx--;
> +		}
> +
> +		vq->async_packed_buffer_idx -= buffers_err;
> +		async_buffer_idx = vq->async_packed_buffer_idx;
> +		/* set 0 to the length of descriptors of DMA-error packets */
> +		for (i = 0; i < buffers_err; i++) {
> +			vq->async_buffers_packed[(async_buffer_idx + i)
> +						& (vq->size - 1)].len = 0;
> +		}
> +		/* write back DMA-error descriptors to used ring */
> +		do {
> +			uint16_t from = async_buffer_idx & (vq->size - 1);
> +			uint16_t to = (from + buffers_err) & (vq->size - 1);
> +
> +			if (to > from) {
> +				vhost_update_used_packed(dev, vq,
> +					vq->async_buffers_packed + from,
> +					to - from);
> +				buffers_err = 0;
> +			} else {
> +				vhost_update_used_packed(dev, vq,
> +					vq->async_buffers_packed + from,
> +					vq->size - from);
> +				buffers_err -= vq->size - from;
> +			}
> +		} while (buffers_err > 0);
> +		vhost_vring_call_packed(dev, vq);

Why notify front-end here?

> +		num_done_pkts = pkt_idx - num_async_pkts;
> +	}
> +
> +	vq->async_pkts_idx += num_async_pkts;
> +	*comp_count = num_done_pkts;
> +
> +	if (likely(vq->shadow_used_idx)) {
> +		vhost_flush_enqueue_shadow_packed(dev, vq);
> +		vhost_vring_call_packed(dev, vq);
> +	}
> +
> +	return pkt_idx;
> +}

virtio_dev_rx_async_submit_packed is too long and it has several parts are
similar with split ring. I think you need to abstract common parts into inline
functions to make the code easier to read.

> +
>  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		struct rte_mbuf **pkts, uint16_t count)
>  {
>  	struct virtio_net *dev = get_device(vid);
>  	struct vhost_virtqueue *vq;
> -	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
> +	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
>  	uint16_t start_idx, pkts_idx, vq_size;
>  	struct async_inflight_info *pkts_info;
>  	uint16_t from, i;
> @@ -1680,53 +2037,96 @@ uint16_t
> rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		goto done;
>  	}
> 
> -	for (i = 0; i < n_pkts_put; i++) {
> -		from = (start_idx + i) & (vq_size - 1);
> -		n_descs += pkts_info[from].descs;
> -		pkts[i] = pkts_info[from].mbuf;
> +	if (vq_is_packed(dev)) {
> +		for (i = 0; i < n_pkts_put; i++) {
> +			from = (start_idx + i) & (vq_size - 1);
> +			n_buffers += pkts_info[from].nr_buffers;
> +			pkts[i] = pkts_info[from].mbuf;
> +		}
> +	} else {
> +		for (i = 0; i < n_pkts_put; i++) {
> +			from = (start_idx + i) & (vq_size - 1);
> +			n_descs += pkts_info[from].descs;
> +			pkts[i] = pkts_info[from].mbuf;
> +		}
>  	}
> +
>  	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
>  	vq->async_pkts_inflight_n -= n_pkts_put;
> 
>  	if (likely(vq->enabled && vq->access_ok)) {
> -		uint16_t nr_left = n_descs;
>  		uint16_t nr_copy;
>  		uint16_t to;
> 
>  		/* write back completed descriptors to used ring */
> -		do {
> -			from = vq->last_async_desc_idx & (vq->size - 1);
> -			nr_copy = nr_left + from <= vq->size ? nr_left :
> -				vq->size - from;
> -			to = vq->last_used_idx & (vq->size - 1);
> -
> -			if (to + nr_copy <= vq->size) {
> -				rte_memcpy(&vq->used->ring[to],
> +		if (vq_is_packed(dev)) {
> +			uint16_t nr_left = n_buffers;
> +			uint16_t to;
> +			do {
> +				from = vq->last_async_buffer_idx &
> +								(vq->size - 1);
> +				to = (from + nr_left) & (vq->size - 1);
> +
> +				if (to > from) {
> +					vhost_update_used_packed(dev, vq,
> +						vq->async_buffers_packed +
> from,
> +						to - from);
> +					vq->last_async_buffer_idx += nr_left;
> +					nr_left = 0;
> +				} else {
> +					vhost_update_used_packed(dev, vq,
> +						vq->async_buffers_packed +
> from,
> +						vq->size - from);
> +					vq->last_async_buffer_idx +=
> +								vq->size -
> from;
> +					nr_left -= vq->size - from;
> +				}
> +			} while (nr_left > 0);
> +			vhost_vring_call_packed(dev, vq);
> +		} else {
> +			uint16_t nr_left = n_descs;
> +			do {
> +				from = vq->last_async_desc_idx & (vq->size -
> 1);
> +				nr_copy = nr_left + from <= vq->size ? nr_left :
> +					vq->size - from;
> +				to = vq->last_used_idx & (vq->size - 1);
> +
> +				if (to + nr_copy <= vq->size) {
> +					rte_memcpy(&vq->used->ring[to],
>  						&vq-
> >async_descs_split[from],
>  						nr_copy *
>  						sizeof(struct
> vring_used_elem));
> -			} else {
> -				uint16_t size = vq->size - to;
> +				} else {
> +					uint16_t size = vq->size - to;
> 
> -				rte_memcpy(&vq->used->ring[to],
> +					rte_memcpy(&vq->used->ring[to],
>  						&vq-
> >async_descs_split[from],
>  						size *
>  						sizeof(struct
> vring_used_elem));
> -				rte_memcpy(vq->used->ring,
> +					rte_memcpy(vq->used->ring,
>  						&vq->async_descs_split[from
> +
>  						size], (nr_copy - size) *
>  						sizeof(struct
> vring_used_elem));
> -			}
> +				}
> +
> +				vq->last_async_desc_idx += nr_copy;
> +				vq->last_used_idx += nr_copy;
> +				nr_left -= nr_copy;
> +			} while (nr_left > 0);
> +
> +			__atomic_add_fetch(&vq->used->idx, n_descs,
> +					__ATOMIC_RELEASE);
> +			vhost_vring_call_split(dev, vq);
> +		}
> 
> -			vq->last_async_desc_idx += nr_copy;
> -			vq->last_used_idx += nr_copy;
> -			nr_left -= nr_copy;
> -		} while (nr_left > 0);
> 
> -		__atomic_add_fetch(&vq->used->idx, n_descs,
> __ATOMIC_RELEASE);
> -		vhost_vring_call_split(dev, vq);
> -	} else
> -		vq->last_async_desc_idx += n_descs;
> +
> +	} else {
> +		if (vq_is_packed(dev))
> +			vq->last_async_buffer_idx += n_buffers;
> +		else
> +			vq->last_async_desc_idx += n_descs;
> +	}

rte_vhost_poll_enqueue_completed is too long and not easy to read. Save suggestion
as above.

Thanks,
Jiayu

> 
>  done:
>  	rte_spinlock_unlock(&vq->access_lock);
> @@ -1767,9 +2167,10 @@ virtio_dev_rx_async_submit(struct virtio_net
> *dev, uint16_t queue_id,
>  	if (count == 0)
>  		goto out;
> 
> -	/* TODO: packed queue not implemented */
>  	if (vq_is_packed(dev))
> -		nb_tx = 0;
> +		nb_tx = virtio_dev_rx_async_submit_packed(dev,
> +				vq, queue_id, pkts, count, comp_pkts,
> +				comp_count);
>  	else
>  		nb_tx = virtio_dev_rx_async_submit_split(dev,
>  				vq, queue_id, pkts, count, comp_pkts,
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: add support for packed ring in async vhost
  2021-04-07  6:26   ` Hu, Jiayu
@ 2021-04-08 12:01     ` Jiang, Cheng1
  0 siblings, 0 replies; 13+ messages in thread
From: Jiang, Cheng1 @ 2021-04-08 12:01 UTC (permalink / raw)
  To: Hu, Jiayu, maxime.coquelin, Xia, Chenbo; +Cc: dev, Yang, YvonneX, Wang, Yinan

Hi Jiayu,

> -----Original Message-----
> From: Hu, Jiayu <jiayu.hu@intel.com>
> Sent: Wednesday, April 7, 2021 2:27 PM
> To: Jiang, Cheng1 <cheng1.jiang@intel.com>; maxime.coquelin@redhat.com;
> Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Yang, YvonneX <yvonnex.yang@intel.com>; Wang, Yinan
> <yinan.wang@intel.com>
> Subject: RE: [PATCH v3] vhost: add support for packed ring in async vhost
> 
> Hi Cheng,
> 
> Some comments are inline.
> 
> > -----Original Message-----
> > From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> > Sent: Wednesday, March 31, 2021 10:06 PM
> > To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> > Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> > <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Jiang,
> > Cheng1 <cheng1.jiang@intel.com>
> > Subject: [PATCH v3] vhost: add support for packed ring in async vhost
> >
> > For now async vhost data path only supports split ring structure. In
> > order to make async vhost compatible with virtio 1.1 spec this patch
> > enables packed ring in async vhost data path.
> >
> > Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> > ---
> > v3:
> >   * fix error handler for DMA-copy packet
> >   * remove variables that are no longer needed
> > v2:
> >   * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
> >   * add async_buffers_packed memory free in vhost_free_async_mem()
> >
> >  lib/librte_vhost/rte_vhost_async.h |   1 +
> >  lib/librte_vhost/vhost.c           |  24 +-
> >  lib/librte_vhost/vhost.h           |   7 +-
> >  lib/librte_vhost/virtio_net.c      | 463 +++++++++++++++++++++++++++--
> >  4 files changed, 457 insertions(+), 38 deletions(-)
> >
> > diff --git a/lib/librte_vhost/rte_vhost_async.h
> > b/lib/librte_vhost/rte_vhost_async.h
> > index c855ff875..6faa31f5a 100644
> > --- a/lib/librte_vhost/rte_vhost_async.h
> > +++ b/lib/librte_vhost/rte_vhost_async.h
> > @@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {  struct
> > async_inflight_info {  struct rte_mbuf *mbuf;  uint16_t descs; /* num
> > of descs inflight */
> > +uint16_t nr_buffers; /* num of buffers inflight for packed ring */
> >  };
> >
> >  /**
> > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index
> > 52ab93d1e..51b44d6f2 100644
> > --- a/lib/librte_vhost/vhost.c
> > +++ b/lib/librte_vhost/vhost.c
> > @@ -330,15 +330,20 @@ vhost_free_async_mem(struct vhost_virtqueue
> > *vq)
> >  {
> >  if (vq->async_pkts_info)
> >  rte_free(vq->async_pkts_info);
> > -if (vq->async_descs_split)
> > +if (vq->async_buffers_packed) {
> > +rte_free(vq->async_buffers_packed);
> > +vq->async_buffers_packed = NULL;
> > +} else {
> >  rte_free(vq->async_descs_split);
> > +vq->async_descs_split = NULL;
> > +}
> > +
> >  if (vq->it_pool)
> >  rte_free(vq->it_pool);
> >  if (vq->vec_pool)
> >  rte_free(vq->vec_pool);
> >
> >  vq->async_pkts_info = NULL;
> > -vq->async_descs_split = NULL;
> >  vq->it_pool = NULL;
> >  vq->vec_pool = NULL;
> >  }
> > @@ -1603,9 +1608,9 @@ int rte_vhost_async_channel_register(int vid,
> > uint16_t queue_id,  return -1;
> >
> >  /* packed queue is not supported */
> > -if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
> > +if (unlikely(!f.async_inorder)) {
> >  VHOST_LOG_CONFIG(ERR,
> > -"async copy is not supported on packed queue or non-inorder mode "
> > +"async copy is not supported on non-inorder mode "
> >  "(vid %d, qid: %d)\n", vid, queue_id);  return -1;  } @@ -1643,10
> > +1648,17 @@ int rte_vhost_async_channel_register(int vid, uint16_t
> > queue_id,  vq->vec_pool = rte_malloc_socket(NULL,
> VHOST_MAX_ASYNC_VEC
> > * sizeof(struct iovec),  RTE_CACHE_LINE_SIZE, node);
> > -vq->async_descs_split = rte_malloc_socket(NULL,
> > +if (vq_is_packed(dev)) {
> > +vq->async_buffers_packed = rte_malloc_socket(NULL, size *
> > +vq->sizeof(struct vring_used_elem_packed),
> > +RTE_CACHE_LINE_SIZE, node);
> > +} else {
> > +vq->async_descs_split = rte_malloc_socket(NULL,
> >  vq->size * sizeof(struct vring_used_elem),  RTE_CACHE_LINE_SIZE,
> > node); -if (!vq->async_descs_split || !vq->async_pkts_info ||
> > +}
> > +
> > +if (!vq->async_pkts_info ||
> 
> Need to check if malloc fails for async_buffers_packed.

Sure, It will be fixed in the next version.

> 
> >  !vq->it_pool || !vq->vec_pool) {
> >  vhost_free_async_mem(vq);
> >  VHOST_LOG_CONFIG(ERR,
> > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index
> > 658f6fc28..d6324fbf8 100644
> > --- a/lib/librte_vhost/vhost.h
> > +++ b/lib/librte_vhost/vhost.h
> > @@ -206,9 +206,14 @@ struct vhost_virtqueue {  uint16_tasync_pkts_idx;
> > uint16_tasync_pkts_inflight_n;  uint16_tasync_last_pkts_n; -struct
> > vring_used_elem  *async_descs_split;
> > +union {
> > +struct vring_used_elem  *async_descs_split; struct
> > +vring_used_elem_packed *async_buffers_packed; };
> >  uint16_t async_desc_idx;
> > +uint16_t async_packed_buffer_idx;
> >  uint16_t last_async_desc_idx;
> > +uint16_t last_async_buffer_idx;
> >
> >  /* vq async features */
> >  boolasync_inorder;
> > diff --git a/lib/librte_vhost/virtio_net.c
> > b/lib/librte_vhost/virtio_net.c index 583bf379c..fa2dfde02 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -363,8 +363,7 @@
> > vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue
> *vq,
> > }
> >
> >  static __rte_always_inline void
> > -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> > -   struct vhost_virtqueue *vq,
> > +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
> >     uint32_t len[],
> >     uint16_t id[],
> >     uint16_t count[],
> > @@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct
> > virtio_net *dev,
> >  vq->shadow_aligned_idx += count[i];
> >  vq->shadow_used_idx++;
> >  }
> > +}
> > +
> > +static __rte_always_inline void
> > +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> > +   struct vhost_virtqueue *vq,
> > +   uint32_t len[],
> > +   uint16_t id[],
> > +   uint16_t count[],
> > +   uint16_t num_buffers)
> > +{
> > +vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
> >
> >  if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
> > do_data_copy_enqueue(dev, vq); @@ -1452,6 +1462,73 @@
> > virtio_dev_rx_async_get_info_idx(uint16_t
> > pkts_idx,
> >  (vq_size - n_inflight + pkts_idx) & (vq_size - 1);  }
> >
> > +static __rte_always_inline void
> > +vhost_update_used_packed(struct virtio_net *dev,
> > +  struct vhost_virtqueue *vq,
> > +  struct vring_used_elem_packed
> > *shadow_ring,
> > +  uint16_t count)
> > +{
> > +if (count == 0)
> > +return;
> > +int i;
> > +uint16_t used_idx = vq->last_used_idx; uint16_t head_idx =
> > +vq->last_used_idx; uint16_t head_flags = 0;
> > +
> > +/* Split loop in two to save memory barriers */ for (i = 0; i <
> > +count; i++) {
> > +vq->desc_packed[used_idx].id = shadow_ring[i].id;
> > +vq->desc_packed[used_idx].len = shadow_ring[i].len;
> > +
> > +used_idx += shadow_ring[i].count;
> > +if (used_idx >= vq->size)
> > +used_idx -= vq->size;
> > +}
> > +
> > +/* The ordering for storing desc flags needs to be enforced. */
> > +rte_atomic_thread_fence(__ATOMIC_RELEASE);
> > +
> > +for (i = 0; i < count; i++) {
> > +uint16_t flags;
> > +
> > +if (vq->shadow_used_packed[i].len)
> > +flags = VRING_DESC_F_WRITE;
> > +else
> > +flags = 0;
> > +
> > +if (vq->used_wrap_counter) {
> > +flags |= VRING_DESC_F_USED;
> > +flags |= VRING_DESC_F_AVAIL;
> > +} else {
> > +flags &= ~VRING_DESC_F_USED;
> > +flags &= ~VRING_DESC_F_AVAIL;
> > +}
> > +
> > +if (i > 0) {
> > +vq->desc_packed[vq->last_used_idx].flags = flags;
> > +
> > +vhost_log_cache_used_vring(dev, vq,
> > +vq->last_used_idx *
> > +sizeof(struct vring_packed_desc),
> > +sizeof(struct vring_packed_desc));
> > +} else {
> > +head_idx = vq->last_used_idx;
> > +head_flags = flags;
> > +}
> > +
> > +vq_inc_last_used_packed(vq, shadow_ring[i].count); }
> > +
> > +vq->desc_packed[head_idx].flags = head_flags;
> > +
> > +vhost_log_cache_used_vring(dev, vq,
> > +head_idx *
> > +sizeof(struct vring_packed_desc),
> > +sizeof(struct vring_packed_desc));
> > +
> > +vhost_log_cache_sync(dev, vq);
> 
> Async enqueue of packed ring has no support of live migration.
> The above code is not needed.

It will be removed.

> 
> > +}
> > +
> >  static __rte_noinline uint32_t
> >  virtio_dev_rx_async_submit_split(struct virtio_net *dev,  struct
> > vhost_virtqueue *vq, uint16_t queue_id, @@ -1633,12 +1710,292 @@
> > virtio_dev_rx_async_submit_split(struct
> > virtio_net *dev,
> >  return pkt_idx;
> >  }
> >
> > +static __rte_always_inline int
> > +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> > +    struct vhost_virtqueue *vq,
> > +    struct rte_mbuf *pkt,
> > +    struct buf_vector *buf_vec,
> > +    uint16_t *nr_descs,
> > +    uint16_t *nr_buffers,
> > +    struct iovec *src_iovec, struct iovec *dst_iovec,
> > +    struct rte_vhost_iov_iter *src_it,
> > +    struct rte_vhost_iov_iter *dst_it) { uint16_t nr_vec = 0;
> > +uint16_t avail_idx = vq->last_avail_idx; uint16_t max_tries, tries =
> > +0; uint16_t buf_id = 0; uint32_t len = 0; uint16_t desc_count;
> > +uint32_t size = pkt->pkt_len + sizeof(struct
> > virtio_net_hdr_mrg_rxbuf);
> > +uint32_t buffer_len[vq->size];
> > +uint16_t buffer_buf_id[vq->size];
> > +uint16_t buffer_desc_count[vq->size]; *nr_buffers = 0;
> > +
> > +if (rxvq_is_mergeable(dev))
> > +max_tries = vq->size - 1;
> > +else
> > +max_tries = 1;
> > +
> > +while (size > 0) {
> > +/*
> > + * if we tried all available ring items, and still
> > + * can't get enough buf, it means something abnormal
> > + * happened.
> > + */
> > +if (unlikely(++tries > max_tries))
> > +return -1;
> > +
> > +if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count,
> > +buf_vec, &nr_vec, &buf_id, &len,
> > +VHOST_ACCESS_RW) < 0))
> > +return -1;
> > +
> > +len = RTE_MIN(len, size);
> > +size -= len;
> > +
> > +buffer_len[*nr_buffers] = len;
> > +buffer_buf_id[*nr_buffers] = buf_id;
> > +buffer_desc_count[*nr_buffers] = desc_count; *nr_buffers += 1;
> > +
> > +*nr_descs += desc_count;
> > +avail_idx += desc_count;
> > +if (avail_idx >= vq->size)
> > +avail_idx -= vq->size;
> > +}
> > +
> > +if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> > +src_iovec, dst_iovec, src_it, dst_it) < 0) return -1;
> > +
> > +vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> > +   buffer_desc_count, *nr_buffers);
> > +
> > +return 0;
> > +}
> > +
> > +static __rte_always_inline int16_t
> > +virtio_dev_rx_async_single_packed(struct virtio_net *dev,
> > +    struct vhost_virtqueue *vq,
> > +    struct rte_mbuf *pkt,
> > +    uint16_t *nr_descs, uint16_t *nr_buffers,
> > +    struct iovec *src_iovec, struct iovec *dst_iovec,
> > +    struct rte_vhost_iov_iter *src_it,
> > +    struct rte_vhost_iov_iter *dst_it) { struct buf_vector
> > +buf_vec[BUF_VECTOR_MAX]; *nr_descs = 0; *nr_buffers = 0;
> > +
> > +if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> > buf_vec,
> > + nr_descs,
> > + nr_buffers,
> > + src_iovec, dst_iovec,
> > + src_it, dst_it) < 0)) {
> > +VHOST_LOG_DATA(DEBUG,
> > +"(%d) failed to get enough desc from vring\n",
> > +dev->vid);
> > +return -1;
> > +}
> > +
> > +VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> > index %d\n",
> > +dev->vid, vq->last_avail_idx,
> > +vq->last_avail_idx + *nr_descs);
> > +
> > +return 0;
> > +}
> > +
> > +static __rte_noinline uint32_t
> > +virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct
> > +vhost_virtqueue *vq, uint16_t queue_id, struct rte_mbuf **pkts,
> > +uint32_t count, struct rte_mbuf **comp_pkts, uint32_t *comp_count) {
> > +uint32_t pkt_idx = 0, pkt_burst_idx = 0; uint16_t num_buffers;
> > +uint16_t num_desc;
> > +
> > +struct rte_vhost_iov_iter *it_pool = vq->it_pool; struct iovec
> > +*vec_pool = vq->vec_pool; struct rte_vhost_async_desc
> > +tdes[MAX_PKT_BURST]; struct iovec *src_iovec = vec_pool; struct iovec
> > +*dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); struct
> > +rte_vhost_iov_iter *src_it = it_pool; struct rte_vhost_iov_iter
> > +*dst_it = it_pool + 1; uint16_t slot_idx = 0; uint16_t segs_await =
> > +0; struct async_inflight_info *pkts_info = vq->async_pkts_info;
> > +uint32_t n_pkts = 0, pkt_err = 0; uint32_t num_async_pkts = 0,
> > +num_done_pkts = 0;
> > +
> > +rte_prefetch0(&vq->desc[vq->last_avail_idx & (vq->size - 1)]);
> > +
> > +for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { if
> > +(unlikely(virtio_dev_rx_async_single_packed(dev, vq, pkts[pkt_idx],
> > +&num_desc, &num_buffers, src_iovec, dst_iovec, src_it, dst_it) < 0))
> > +{ break; }
> > +
> > +VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> > index %d\n",
> > +dev->vid, vq->last_avail_idx,
> > +vq->last_avail_idx + num_desc);
> > +
> > +slot_idx = (vq->async_pkts_idx + num_async_pkts) & (vq->size - 1); if
> > +(src_it->count) { uint16_t from, to;
> > +
> > +async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
> > +pkts_info[slot_idx].descs = num_desc; pkts_info[slot_idx].nr_buffers
> > += num_buffers; pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> > +num_async_pkts++; src_iovec += src_it->nr_segs; dst_iovec +=
> > +dst_it->nr_segs; src_it += 2; dst_it += 2; segs_await +=
> > +src_it->nr_segs;
> > +
> > +/**
> > + * recover shadow used ring and keep DMA-occupied
> > + * descriptors.
> > + */
> > +from = vq->shadow_used_idx - num_buffers;
> > +to = vq->async_packed_buffer_idx & (vq->size - 1);
> > +if (num_buffers + to <= vq->size) {
> > +rte_memcpy(&vq->async_buffers_packed[to],
> > +&vq->shadow_used_packed[from],
> > +num_buffers *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +} else {
> > +int size = vq->size - to;
> > +
> > +rte_memcpy(&vq->async_buffers_packed[to],
> > +&vq->shadow_used_packed[from],
> > +size *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +rte_memcpy(vq->async_buffers_packed,
> > +&vq->shadow_used_packed[from +
> > +size], (num_buffers - size) *
> > +sizeof(struct
> > vring_used_elem_packed));
> > +}
> > +vq->async_packed_buffer_idx += num_buffers;
> > +vq->shadow_used_idx -= num_buffers;
> > +} else
> > +comp_pkts[num_done_pkts++] = pkts[pkt_idx];
> > +
> > +vq_inc_last_avail_packed(vq, num_desc);
> > +
> > +/*
> > + * conditions to trigger async device transfer:
> > + * - buffered packet number reaches transfer threshold
> > + * - unused async iov number is less than max vhost vector
> > + */
> > +if (unlikely(pkt_burst_idx >=
> > VHOST_ASYNC_BATCH_THRESHOLD ||
> > +((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
> > +BUF_VECTOR_MAX))) {
> > +n_pkts = vq->async_ops.transfer_data(dev->vid,
> > +queue_id, tdes, 0, pkt_burst_idx);
> > +src_iovec = vec_pool;
> > +dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >>
> > 1);
> > +src_it = it_pool;
> > +dst_it = it_pool + 1;
> > +segs_await = 0;
> > +vq->async_pkts_inflight_n += n_pkts;
> > +
> > +if (unlikely(n_pkts < pkt_burst_idx)) {
> > +/*
> > + * log error packets number here and do
> > actual
> > + * error processing when applications poll
> > + * completion
> > + */
> > +pkt_err = pkt_burst_idx - n_pkts;
> > +pkt_burst_idx = 0;
> > +pkt_idx++;
> > +break;
> > +}
> > +
> > +pkt_burst_idx = 0;
> > +}
> > +}
> > +
> > +if (pkt_burst_idx) {
> > +n_pkts = vq->async_ops.transfer_data(dev->vid,
> > +queue_id, tdes, 0, pkt_burst_idx);
> > +vq->async_pkts_inflight_n += n_pkts;
> > +
> > +if (unlikely(n_pkts < pkt_burst_idx))
> > +pkt_err = pkt_burst_idx - n_pkts;
> > +}
> > +
> > +do_data_copy_enqueue(dev, vq);
> > +
> > +if (unlikely(pkt_err)) {
> > +uint16_t buffers_err = 0;
> > +uint16_t async_buffer_idx;
> > +uint16_t i;
> > +
> > +num_async_pkts -= pkt_err;
> > +pkt_idx -= pkt_err;
> > +/* calculate the sum of buffers of DMA-error packets. */
> > +while (pkt_err-- > 0) {
> > +buffers_err +=
> > +pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
> > +slot_idx--;
> > +}
> > +
> > +vq->async_packed_buffer_idx -= buffers_err;
> > +async_buffer_idx = vq->async_packed_buffer_idx;
> > +/* set 0 to the length of descriptors of DMA-error packets */
> > +for (i = 0; i < buffers_err; i++) {
> > +vq->async_buffers_packed[(async_buffer_idx + i)
> > +& (vq->size - 1)].len = 0;
> > +}
> > +/* write back DMA-error descriptors to used ring */
> > +do {
> > +uint16_t from = async_buffer_idx & (vq->size - 1);
> > +uint16_t to = (from + buffers_err) & (vq->size - 1);
> > +
> > +if (to > from) {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed + from,
> > +to - from);
> > +buffers_err = 0;
> > +} else {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed + from,
> > +vq->size - from);
> > +buffers_err -= vq->size - from;
> > +}
> > +} while (buffers_err > 0);
> > +vhost_vring_call_packed(dev, vq);
> 
> Why notify front-end here?

The error handling method will be changed in the next version, so this notification will be removed.

> 
> > +num_done_pkts = pkt_idx - num_async_pkts;
> > +}
> > +
> > +vq->async_pkts_idx += num_async_pkts;
> > +*comp_count = num_done_pkts;
> > +
> > +if (likely(vq->shadow_used_idx)) {
> > +vhost_flush_enqueue_shadow_packed(dev, vq);
> > +vhost_vring_call_packed(dev, vq);
> > +}
> > +
> > +return pkt_idx;
> > +}
> 
> virtio_dev_rx_async_submit_packed is too long and it has several parts are
> similar with split ring. I think you need to abstract common parts into inline
> functions to make the code easier to read.

I'm not sure which parts can be easily processed into functions. Maybe we can have a discussion offline. 

> 
> > +
> >  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> >  struct rte_mbuf **pkts, uint16_t count)
> >  {
> >  struct virtio_net *dev = get_device(vid);
> >  struct vhost_virtqueue *vq;
> > -uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
> > +uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
> >  uint16_t start_idx, pkts_idx, vq_size;
> >  struct async_inflight_info *pkts_info;
> >  uint16_t from, i;
> > @@ -1680,53 +2037,96 @@ uint16_t
> > rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
> >  goto done;
> >  }
> >
> > -for (i = 0; i < n_pkts_put; i++) {
> > -from = (start_idx + i) & (vq_size - 1);
> > -n_descs += pkts_info[from].descs;
> > -pkts[i] = pkts_info[from].mbuf;
> > +if (vq_is_packed(dev)) {
> > +for (i = 0; i < n_pkts_put; i++) {
> > +from = (start_idx + i) & (vq_size - 1);
> > +n_buffers += pkts_info[from].nr_buffers;
> > +pkts[i] = pkts_info[from].mbuf;
> > +}
> > +} else {
> > +for (i = 0; i < n_pkts_put; i++) {
> > +from = (start_idx + i) & (vq_size - 1);
> > +n_descs += pkts_info[from].descs;
> > +pkts[i] = pkts_info[from].mbuf;
> > +}
> >  }
> > +
> >  vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
> >  vq->async_pkts_inflight_n -= n_pkts_put;
> >
> >  if (likely(vq->enabled && vq->access_ok)) {
> > -uint16_t nr_left = n_descs;
> >  uint16_t nr_copy;
> >  uint16_t to;
> >
> >  /* write back completed descriptors to used ring */
> > -do {
> > -from = vq->last_async_desc_idx & (vq->size - 1);
> > -nr_copy = nr_left + from <= vq->size ? nr_left :
> > -vq->size - from;
> > -to = vq->last_used_idx & (vq->size - 1);
> > -
> > -if (to + nr_copy <= vq->size) {
> > -rte_memcpy(&vq->used->ring[to],
> > +if (vq_is_packed(dev)) {
> > +uint16_t nr_left = n_buffers;
> > +uint16_t to;
> > +do {
> > +from = vq->last_async_buffer_idx &
> > +(vq->size - 1);
> > +to = (from + nr_left) & (vq->size - 1);
> > +
> > +if (to > from) {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed +
> > from,
> > +to - from);
> > +vq->last_async_buffer_idx += nr_left;
> > +nr_left = 0;
> > +} else {
> > +vhost_update_used_packed(dev, vq,
> > +vq->async_buffers_packed +
> > from,
> > +vq->size - from);
> > +vq->last_async_buffer_idx +=
> > +vq->size -
> > from;
> > +nr_left -= vq->size - from;
> > +}
> > +} while (nr_left > 0);
> > +vhost_vring_call_packed(dev, vq);
> > +} else {
> > +uint16_t nr_left = n_descs;
> > +do {
> > +from = vq->last_async_desc_idx & (vq->size -
> > 1);
> > +nr_copy = nr_left + from <= vq->size ? nr_left :
> > +vq->size - from;
> > +to = vq->last_used_idx & (vq->size - 1);
> > +
> > +if (to + nr_copy <= vq->size) {
> > +rte_memcpy(&vq->used->ring[to],
> >  &vq-
> > >async_descs_split[from],
> >  nr_copy *
> >  sizeof(struct
> > vring_used_elem));
> > -} else {
> > -uint16_t size = vq->size - to;
> > +} else {
> > +uint16_t size = vq->size - to;
> >
> > -rte_memcpy(&vq->used->ring[to],
> > +rte_memcpy(&vq->used->ring[to],
> >  &vq-
> > >async_descs_split[from],
> >  size *
> >  sizeof(struct
> > vring_used_elem));
> > -rte_memcpy(vq->used->ring,
> > +rte_memcpy(vq->used->ring,
> >  &vq->async_descs_split[from
> > +
> >  size], (nr_copy - size) *
> >  sizeof(struct
> > vring_used_elem));
> > -}
> > +}
> > +
> > +vq->last_async_desc_idx += nr_copy;
> > +vq->last_used_idx += nr_copy;
> > +nr_left -= nr_copy;
> > +} while (nr_left > 0);
> > +
> > +__atomic_add_fetch(&vq->used->idx, n_descs,
> > +__ATOMIC_RELEASE);
> > +vhost_vring_call_split(dev, vq);
> > +}
> >
> > -vq->last_async_desc_idx += nr_copy;
> > -vq->last_used_idx += nr_copy;
> > -nr_left -= nr_copy;
> > -} while (nr_left > 0);
> >
> > -__atomic_add_fetch(&vq->used->idx, n_descs,
> > __ATOMIC_RELEASE);
> > -vhost_vring_call_split(dev, vq);
> > -} else
> > -vq->last_async_desc_idx += n_descs;
> > +
> > +} else {
> > +if (vq_is_packed(dev))
> > +vq->last_async_buffer_idx += n_buffers;
> > +else
> > +vq->last_async_desc_idx += n_descs;
> > +}
> 
> rte_vhost_poll_enqueue_completed is too long and not easy to read. Save
> suggestion
> as above.
> 

I can try to process some code into functions, but I'm not sure if this is necessary, I will discuss it with you later.

Thanks,
Cheng

> Thanks,
> Jiayu
> 
> >
> >  done:
> >  rte_spinlock_unlock(&vq->access_lock);
> > @@ -1767,9 +2167,10 @@ virtio_dev_rx_async_submit(struct virtio_net
> > *dev, uint16_t queue_id,
> >  if (count == 0)
> >  goto out;
> >
> > -/* TODO: packed queue not implemented */
> >  if (vq_is_packed(dev))
> > -nb_tx = 0;
> > +nb_tx = virtio_dev_rx_async_submit_packed(dev,
> > +vq, queue_id, pkts, count, comp_pkts,
> > +comp_count);
> >  else
> >  nb_tx = virtio_dev_rx_async_submit_split(dev,
> >  vq, queue_id, pkts, count, comp_pkts,
> > --
> > 2.29.2
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 0/4] add support for packed ring in async vhost
  2021-03-17  8:54 [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost Cheng Jiang
  2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
  2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
@ 2021-04-10 10:25 ` Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
                     ` (4 more replies)
  2 siblings, 5 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

For now async vhost data path only supports split ring structure. In
order to make async vhost compatible with virtio 1.1 spec this patch
set cleans async split ring codes and enables packed ring in async
vhost data path. Batch datapath is also enabled in packed ring.

v4:
  * change the patch structure
  * clean code for async split ring
  * reuse some code from split ring
  * change the error handler for DMA-copy packet
  * add check for malloc
  * remove useless code
  * add doc update
v3:
  * fix error handler for DMA-copy packet
v2:
  * fix wrong buffer index in rte_vhost_poll_enqueue_completed()
  * add async_buffers_packed memory free in vhost_free_async_mem()

Cheng Jiang (4):
  vhost: abstract and reorganize async split ring code
  vhost: add support for packed ring in async vhost
  vhost: add batch datapath for async vhost packed ring
  doc: add release note for vhost async packed ring

 doc/guides/rel_notes/release_21_05.rst |   4 +
 lib/librte_vhost/rte_vhost_async.h     |   1 +
 lib/librte_vhost/vhost.c               |  27 +-
 lib/librte_vhost/vhost.h               |   7 +-
 lib/librte_vhost/virtio_net.c          | 603 ++++++++++++++++++++++---
 5 files changed, 560 insertions(+), 82 deletions(-)

--
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
@ 2021-04-10 10:25   ` Cheng Jiang
  2021-04-10 10:25   ` Cheng Jiang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

In order to improve code efficiency and readability when async packed
ring support is enabled. This patch abstract some functions like
shadow_ring_store and write_back_completed_descs_split. And improve
the efficiency of some pointer offset calculation.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/librte_vhost/virtio_net.c | 146 +++++++++++++++++++---------------
 1 file changed, 84 insertions(+), 62 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff3987860..69553e7c3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1458,6 +1458,29 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
 		(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
 }
 
+static __rte_always_inline void
+shadow_ring_store(struct vhost_virtqueue *vq,  void *shadow_ring, void *d_ring,
+		uint16_t s_idx, uint16_t d_idx,
+		uint16_t count, uint16_t elem_size)
+{
+	if (s_idx + count <= vq->size) {
+		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
+			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
+			count * elem_size);
+	} else {
+		uint16_t size = vq->size - d_idx;
+
+		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
+			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
+			size * elem_size);
+
+		rte_memcpy((void *)((uintptr_t)d_ring),
+			(void *)((uintptr_t)shadow_ring +
+				(s_idx + size) * elem_size),
+			(count - size) * elem_size);
+	}
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, uint16_t queue_id,
@@ -1478,6 +1501,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
 	uint16_t slot_idx = 0;
 	uint16_t segs_await = 0;
+	uint16_t iovec_idx = 0, it_idx = 0;
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
@@ -1513,27 +1537,32 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 
 		if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
 				buf_vec, nr_vec, num_buffers,
-				src_iovec, dst_iovec, src_it, dst_it) < 0) {
+				&src_iovec[iovec_idx],
+				&dst_iovec[iovec_idx],
+				&src_it[it_idx],
+				&dst_it[it_idx]) < 0) {
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
 
 		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
 			(vq->size - 1);
-		if (src_it->count) {
+		if (src_it[it_idx].count) {
 			uint16_t from, to;
 
-			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+			async_fill_desc(&tdes[pkt_burst_idx++],
+				&src_it[it_idx],
+				&dst_it[it_idx]);
 			pkts_info[slot_idx].descs = num_buffers;
 			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
 			async_pkts_log[num_async_pkts++].last_avail_idx =
 				vq->last_avail_idx;
-			src_iovec += src_it->nr_segs;
-			dst_iovec += dst_it->nr_segs;
-			src_it += 2;
-			dst_it += 2;
-			segs_await += src_it->nr_segs;
+
+			iovec_idx += src_it[it_idx].nr_segs;
+			it_idx += 2;
+
+			segs_await += src_it[it_idx].nr_segs;
 
 			/**
 			 * recover shadow used ring and keep DMA-occupied
@@ -1541,23 +1570,12 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			 */
 			from = vq->shadow_used_idx - num_buffers;
 			to = vq->async_desc_idx & (vq->size - 1);
-			if (num_buffers + to <= vq->size) {
-				rte_memcpy(&vq->async_descs_split[to],
-						&vq->shadow_used_split[from],
-						num_buffers *
-						sizeof(struct vring_used_elem));
-			} else {
-				int size = vq->size - to;
-
-				rte_memcpy(&vq->async_descs_split[to],
-						&vq->shadow_used_split[from],
-						size *
-						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->async_descs_split,
-						&vq->shadow_used_split[from +
-						size], (num_buffers - size) *
-					   sizeof(struct vring_used_elem));
-			}
+
+			shadow_ring_store(vq, vq->shadow_used_split,
+					vq->async_descs_split,
+					from, to, num_buffers,
+					sizeof(struct vring_used_elem));
+
 			vq->async_desc_idx += num_buffers;
 			vq->shadow_used_idx -= num_buffers;
 		} else
@@ -1575,10 +1593,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			BUF_VECTOR_MAX))) {
 			n_pkts = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
-			src_iovec = vec_pool;
-			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
-			src_it = it_pool;
-			dst_it = it_pool + 1;
+			iovec_idx = 0;
+			it_idx = 0;
+
 			segs_await = 0;
 			vq->async_pkts_inflight_n += n_pkts;
 
@@ -1639,6 +1656,43 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline void
+write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
+{
+	uint16_t nr_left = n_descs;
+	uint16_t nr_copy;
+	uint16_t to, from;
+
+	do {
+		from = vq->last_async_desc_idx & (vq->size - 1);
+		nr_copy = nr_left + from <= vq->size ? nr_left :
+			vq->size - from;
+		to = vq->last_used_idx & (vq->size - 1);
+
+		if (to + nr_copy <= vq->size) {
+			rte_memcpy(&vq->used->ring[to],
+					&vq->async_descs_split[from],
+					nr_copy *
+					sizeof(struct vring_used_elem));
+		} else {
+			uint16_t size = vq->size - to;
+
+			rte_memcpy(&vq->used->ring[to],
+					&vq->async_descs_split[from],
+					size *
+					sizeof(struct vring_used_elem));
+			rte_memcpy(vq->used->ring,
+					&vq->async_descs_split[from +
+					size], (nr_copy - size) *
+					sizeof(struct vring_used_elem));
+		}
+
+		vq->last_async_desc_idx += nr_copy;
+		vq->last_used_idx += nr_copy;
+		nr_left -= nr_copy;
+	} while (nr_left > 0);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
@@ -1695,39 +1749,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	vq->async_pkts_inflight_n -= n_pkts_put;
 
 	if (likely(vq->enabled && vq->access_ok)) {
-		uint16_t nr_left = n_descs;
-		uint16_t nr_copy;
-		uint16_t to;
-
-		/* write back completed descriptors to used ring */
-		do {
-			from = vq->last_async_desc_idx & (vq->size - 1);
-			nr_copy = nr_left + from <= vq->size ? nr_left :
-				vq->size - from;
-			to = vq->last_used_idx & (vq->size - 1);
-
-			if (to + nr_copy <= vq->size) {
-				rte_memcpy(&vq->used->ring[to],
-						&vq->async_descs_split[from],
-						nr_copy *
-						sizeof(struct vring_used_elem));
-			} else {
-				uint16_t size = vq->size - to;
-
-				rte_memcpy(&vq->used->ring[to],
-						&vq->async_descs_split[from],
-						size *
-						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->used->ring,
-						&vq->async_descs_split[from +
-						size], (nr_copy - size) *
-						sizeof(struct vring_used_elem));
-			}
-
-			vq->last_async_desc_idx += nr_copy;
-			vq->last_used_idx += nr_copy;
-			nr_left -= nr_copy;
-		} while (nr_left > 0);
+		write_back_completed_descs_split(vq, n_descs);
 
 		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
 		vhost_vring_call_split(dev, vq);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
@ 2021-04-10 10:25   ` Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

In order to improve code efficiency and readability when async packed
ring support is enabled. This patch abstract some functions like
shadow_ring_store and write_back_completed_descs_split. And improve
the efficiency of some pointer offset calculation.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/librte_vhost/virtio_net.c | 146 +++++++++++++++++++---------------
 1 file changed, 84 insertions(+), 62 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff3987860..69553e7c3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1458,6 +1458,29 @@ virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
 		(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
 }
 
+static __rte_always_inline void
+shadow_ring_store(struct vhost_virtqueue *vq,  void *shadow_ring, void *d_ring,
+		uint16_t s_idx, uint16_t d_idx,
+		uint16_t count, uint16_t elem_size)
+{
+	if (s_idx + count <= vq->size) {
+		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
+			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
+			count * elem_size);
+	} else {
+		uint16_t size = vq->size - d_idx;
+
+		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
+			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
+			size * elem_size);
+
+		rte_memcpy((void *)((uintptr_t)d_ring),
+			(void *)((uintptr_t)shadow_ring +
+				(s_idx + size) * elem_size),
+			(count - size) * elem_size);
+	}
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, uint16_t queue_id,
@@ -1478,6 +1501,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
 	uint16_t slot_idx = 0;
 	uint16_t segs_await = 0;
+	uint16_t iovec_idx = 0, it_idx = 0;
 	struct async_inflight_info *pkts_info = vq->async_pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
@@ -1513,27 +1537,32 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 
 		if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
 				buf_vec, nr_vec, num_buffers,
-				src_iovec, dst_iovec, src_it, dst_it) < 0) {
+				&src_iovec[iovec_idx],
+				&dst_iovec[iovec_idx],
+				&src_it[it_idx],
+				&dst_it[it_idx]) < 0) {
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
 
 		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
 			(vq->size - 1);
-		if (src_it->count) {
+		if (src_it[it_idx].count) {
 			uint16_t from, to;
 
-			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+			async_fill_desc(&tdes[pkt_burst_idx++],
+				&src_it[it_idx],
+				&dst_it[it_idx]);
 			pkts_info[slot_idx].descs = num_buffers;
 			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
 			async_pkts_log[num_async_pkts++].last_avail_idx =
 				vq->last_avail_idx;
-			src_iovec += src_it->nr_segs;
-			dst_iovec += dst_it->nr_segs;
-			src_it += 2;
-			dst_it += 2;
-			segs_await += src_it->nr_segs;
+
+			iovec_idx += src_it[it_idx].nr_segs;
+			it_idx += 2;
+
+			segs_await += src_it[it_idx].nr_segs;
 
 			/**
 			 * recover shadow used ring and keep DMA-occupied
@@ -1541,23 +1570,12 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			 */
 			from = vq->shadow_used_idx - num_buffers;
 			to = vq->async_desc_idx & (vq->size - 1);
-			if (num_buffers + to <= vq->size) {
-				rte_memcpy(&vq->async_descs_split[to],
-						&vq->shadow_used_split[from],
-						num_buffers *
-						sizeof(struct vring_used_elem));
-			} else {
-				int size = vq->size - to;
-
-				rte_memcpy(&vq->async_descs_split[to],
-						&vq->shadow_used_split[from],
-						size *
-						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->async_descs_split,
-						&vq->shadow_used_split[from +
-						size], (num_buffers - size) *
-					   sizeof(struct vring_used_elem));
-			}
+
+			shadow_ring_store(vq, vq->shadow_used_split,
+					vq->async_descs_split,
+					from, to, num_buffers,
+					sizeof(struct vring_used_elem));
+
 			vq->async_desc_idx += num_buffers;
 			vq->shadow_used_idx -= num_buffers;
 		} else
@@ -1575,10 +1593,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			BUF_VECTOR_MAX))) {
 			n_pkts = vq->async_ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
-			src_iovec = vec_pool;
-			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
-			src_it = it_pool;
-			dst_it = it_pool + 1;
+			iovec_idx = 0;
+			it_idx = 0;
+
 			segs_await = 0;
 			vq->async_pkts_inflight_n += n_pkts;
 
@@ -1639,6 +1656,43 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline void
+write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
+{
+	uint16_t nr_left = n_descs;
+	uint16_t nr_copy;
+	uint16_t to, from;
+
+	do {
+		from = vq->last_async_desc_idx & (vq->size - 1);
+		nr_copy = nr_left + from <= vq->size ? nr_left :
+			vq->size - from;
+		to = vq->last_used_idx & (vq->size - 1);
+
+		if (to + nr_copy <= vq->size) {
+			rte_memcpy(&vq->used->ring[to],
+					&vq->async_descs_split[from],
+					nr_copy *
+					sizeof(struct vring_used_elem));
+		} else {
+			uint16_t size = vq->size - to;
+
+			rte_memcpy(&vq->used->ring[to],
+					&vq->async_descs_split[from],
+					size *
+					sizeof(struct vring_used_elem));
+			rte_memcpy(vq->used->ring,
+					&vq->async_descs_split[from +
+					size], (nr_copy - size) *
+					sizeof(struct vring_used_elem));
+		}
+
+		vq->last_async_desc_idx += nr_copy;
+		vq->last_used_idx += nr_copy;
+		nr_left -= nr_copy;
+	} while (nr_left > 0);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
@@ -1695,39 +1749,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	vq->async_pkts_inflight_n -= n_pkts_put;
 
 	if (likely(vq->enabled && vq->access_ok)) {
-		uint16_t nr_left = n_descs;
-		uint16_t nr_copy;
-		uint16_t to;
-
-		/* write back completed descriptors to used ring */
-		do {
-			from = vq->last_async_desc_idx & (vq->size - 1);
-			nr_copy = nr_left + from <= vq->size ? nr_left :
-				vq->size - from;
-			to = vq->last_used_idx & (vq->size - 1);
-
-			if (to + nr_copy <= vq->size) {
-				rte_memcpy(&vq->used->ring[to],
-						&vq->async_descs_split[from],
-						nr_copy *
-						sizeof(struct vring_used_elem));
-			} else {
-				uint16_t size = vq->size - to;
-
-				rte_memcpy(&vq->used->ring[to],
-						&vq->async_descs_split[from],
-						size *
-						sizeof(struct vring_used_elem));
-				rte_memcpy(vq->used->ring,
-						&vq->async_descs_split[from +
-						size], (nr_copy - size) *
-						sizeof(struct vring_used_elem));
-			}
-
-			vq->last_async_desc_idx += nr_copy;
-			vq->last_used_idx += nr_copy;
-			nr_left -= nr_copy;
-		} while (nr_left > 0);
+		write_back_completed_descs_split(vq, n_descs);
 
 		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
 		vhost_vring_call_split(dev, vq);
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
  2021-04-10 10:25   ` Cheng Jiang
@ 2021-04-10 10:25   ` Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async " Cheng Jiang
  4 siblings, 0 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

For now async vhost data path only supports split ring structure. In
order to make async vhost compatible with virtio 1.1 spec this patch
enables packed ring in async vhost data path.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/librte_vhost/rte_vhost_async.h |   1 +
 lib/librte_vhost/vhost.c           |  27 +-
 lib/librte_vhost/vhost.h           |   7 +-
 lib/librte_vhost/virtio_net.c      | 428 ++++++++++++++++++++++++++++-
 4 files changed, 441 insertions(+), 22 deletions(-)

diff --git a/lib/librte_vhost/rte_vhost_async.h b/lib/librte_vhost/rte_vhost_async.h
index c855ff875..6faa31f5a 100644
--- a/lib/librte_vhost/rte_vhost_async.h
+++ b/lib/librte_vhost/rte_vhost_async.h
@@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
 struct async_inflight_info {
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
+	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
 };
 
 /**
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index a70fe01d8..8c9935c0f 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -342,15 +342,21 @@ vhost_free_async_mem(struct vhost_virtqueue *vq)
 {
 	if (vq->async_pkts_info)
 		rte_free(vq->async_pkts_info);
-	if (vq->async_descs_split)
+	if (vq->async_buffers_packed) {
+		rte_free(vq->async_buffers_packed);
+		vq->async_buffers_packed = NULL;
+	}
+	if (vq->async_descs_split) {
 		rte_free(vq->async_descs_split);
+		vq->async_descs_split = NULL;
+	}
+
 	if (vq->it_pool)
 		rte_free(vq->it_pool);
 	if (vq->vec_pool)
 		rte_free(vq->vec_pool);
 
 	vq->async_pkts_info = NULL;
-	vq->async_descs_split = NULL;
 	vq->it_pool = NULL;
 	vq->vec_pool = NULL;
 }
@@ -1627,9 +1633,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 		return -1;
 
 	/* packed queue is not supported */
-	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+	if (unlikely(!f.async_inorder)) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on packed queue or non-inorder mode "
+			"async copy is not supported on non-inorder mode "
 			"(vid %d, qid: %d)\n", vid, queue_id);
 		return -1;
 	}
@@ -1667,11 +1673,18 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->vec_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
 			RTE_CACHE_LINE_SIZE, node);
-	vq->async_descs_split = rte_malloc_socket(NULL,
+	if (vq_is_packed(dev)) {
+		vq->async_buffers_packed = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem_packed),
+			RTE_CACHE_LINE_SIZE, node);
+	} else {
+		vq->async_descs_split = rte_malloc_socket(NULL,
 			vq->size * sizeof(struct vring_used_elem),
 			RTE_CACHE_LINE_SIZE, node);
-	if (!vq->async_descs_split || !vq->async_pkts_info ||
-		!vq->it_pool || !vq->vec_pool) {
+	}
+
+	if (!vq->async_buffers_packed || !vq->async_descs_split ||
+		!vq->async_pkts_info || !vq->it_pool || !vq->vec_pool) {
 		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
 				"async register failed: cannot allocate memory for vq data "
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index f628714c2..fe131ae8f 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -201,9 +201,14 @@ struct vhost_virtqueue {
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
 	uint16_t	async_last_pkts_n;
-	struct vring_used_elem  *async_descs_split;
+	union {
+		struct vring_used_elem  *async_descs_split;
+		struct vring_used_elem_packed *async_buffers_packed;
+	};
 	uint16_t async_desc_idx;
+	uint16_t async_packed_buffer_idx;
 	uint16_t last_async_desc_idx;
+	uint16_t last_async_buffer_idx;
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 69553e7c3..2b8b873ca 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -363,8 +363,7 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
-				   struct vhost_virtqueue *vq,
+vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
 				   uint32_t len[],
 				   uint16_t id[],
 				   uint16_t count[],
@@ -382,6 +381,17 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 		vq->shadow_aligned_idx += count[i];
 		vq->shadow_used_idx++;
 	}
+}
+
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t len[],
+				   uint16_t id[],
+				   uint16_t count[],
+				   uint16_t num_buffers)
+{
+	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
 	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
 		do_data_copy_enqueue(dev, vq);
@@ -1481,6 +1491,61 @@ shadow_ring_store(struct vhost_virtqueue *vq,  void *shadow_ring, void *d_ring,
 	}
 }
 
+static __rte_always_inline void
+vhost_update_used_packed(struct vhost_virtqueue *vq,
+			struct vring_used_elem_packed *shadow_ring,
+			uint16_t count)
+{
+	if (count == 0)
+		return;
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < count; i++) {
+		vq->desc_packed[used_idx].id = shadow_ring[i].id;
+		vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+		used_idx += shadow_ring[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	/* The ordering for storing desc flags needs to be enforced. */
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	for (i = 0; i < count; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, shadow_ring[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, uint16_t queue_id,
@@ -1656,6 +1721,293 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    struct buf_vector *buf_vec,
+			    uint16_t *nr_descs,
+			    uint16_t *nr_buffers,
+			    struct vring_packed_desc *async_descs,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count = 0;
+	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
+	*nr_buffers = 0;
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		buffer_len[*nr_buffers] = len;
+		buffer_buf_id[*nr_buffers] = buf_id;
+		buffer_desc_count[*nr_buffers] = desc_count;
+		*nr_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
+		src_iovec, dst_iovec, src_it, dst_it) < 0)
+		return -1;
+	/* store descriptors for DMA */
+	if (avail_idx >= *nr_descs)
+		rte_memcpy(async_descs,
+			&vq->desc_packed[vq->last_avail_idx],
+			*nr_descs * sizeof(struct vring_packed_desc));
+	else {
+		uint16_t nr_copy = vq->size - vq->last_avail_idx;
+		rte_memcpy(async_descs,
+			&vq->desc_packed[vq->last_avail_idx],
+			nr_copy * sizeof(struct vring_packed_desc));
+		rte_memcpy(async_descs + nr_copy,
+			vq->desc_packed, (*nr_descs - nr_copy) *
+			sizeof(struct vring_packed_desc));
+	}
+
+	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
+					   buffer_desc_count, *nr_buffers);
+
+	return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    uint16_t *nr_descs, uint16_t *nr_buffers,
+			    struct vring_packed_desc *async_descs,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+	*nr_descs = 0;
+	*nr_buffers = 0;
+
+	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec,
+						 nr_descs,
+						 nr_buffers,
+						 async_descs,
+						 src_iovec, dst_iovec,
+						 src_it, dst_it) < 0)) {
+		VHOST_LOG_DATA(DEBUG,
+				"(%d) failed to get enough desc from vring\n",
+				dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + *nr_descs);
+
+	return 0;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count,
+	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint16_t async_descs_idx = 0;
+	uint16_t num_buffers;
+	uint16_t num_desc;
+
+	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+	struct iovec *vec_pool = vq->vec_pool;
+	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+	struct iovec *src_iovec = vec_pool;
+	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct rte_vhost_iov_iter *src_it = it_pool;
+	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+	uint16_t slot_idx = 0;
+	uint16_t segs_await = 0;
+	uint16_t iovec_idx = 0, it_idx = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+	uint32_t n_pkts = 0, pkt_err = 0;
+	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	struct vring_packed_desc async_descs[vq->size];
+
+	rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & (vq->size - 1)]);
+
+	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
+						pkts[pkt_idx],
+						&num_desc, &num_buffers,
+						&async_descs[async_descs_idx],
+						&src_iovec[iovec_idx],
+						&dst_iovec[iovec_idx],
+						&src_it[it_idx],
+						&dst_it[it_idx]) < 0))
+			break;
+
+		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + num_desc);
+
+		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+			(vq->size - 1);
+		if (src_it[it_idx].count) {
+			uint16_t from, to;
+
+			async_descs_idx += num_desc;
+			async_fill_desc(&tdes[pkt_burst_idx++], &src_it[it_idx],
+					&dst_it[it_idx]);
+			pkts_info[slot_idx].descs = num_desc;
+			pkts_info[slot_idx].nr_buffers = num_buffers;
+			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+			num_async_pkts++;
+			iovec_idx += src_it[it_idx].nr_segs;
+			it_idx += 2;
+			segs_await += src_it->nr_segs;
+
+			/**
+			 * recover shadow used ring and keep DMA-occupied
+			 * descriptors.
+			 */
+			from = vq->shadow_used_idx - num_buffers;
+			to = vq->async_packed_buffer_idx & (vq->size - 1);
+			shadow_ring_store(vq, vq->shadow_used_packed,
+					vq->async_buffers_packed,
+					from, to, num_buffers,
+					sizeof(struct vring_used_elem_packed));
+
+			vq->async_packed_buffer_idx += num_buffers;
+			vq->shadow_used_idx -= num_buffers;
+		} else
+			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+
+		vq_inc_last_avail_packed(vq, num_desc);
+
+		/*
+		 * conditions to trigger async device transfer:
+		 * - buffered packet number reaches transfer threshold
+		 * - unused async iov number is less than max vhost vector
+		 */
+		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+			BUF_VECTOR_MAX))) {
+			n_pkts = vq->async_ops.transfer_data(dev->vid,
+					queue_id, tdes, 0, pkt_burst_idx);
+			iovec_idx = 0;
+			it_idx = 0;
+			segs_await = 0;
+			vq->async_pkts_inflight_n += n_pkts;
+
+			if (unlikely(n_pkts < pkt_burst_idx)) {
+				/*
+				 * log error packets number here and do actual
+				 * error processing when applications poll
+				 * completion
+				 */
+				pkt_err = pkt_burst_idx - n_pkts;
+				pkt_burst_idx = 0;
+				pkt_idx++;
+				break;
+			}
+
+			pkt_burst_idx = 0;
+		}
+	}
+
+	if (pkt_burst_idx) {
+		n_pkts = vq->async_ops.transfer_data(dev->vid,
+				queue_id, tdes, 0, pkt_burst_idx);
+		vq->async_pkts_inflight_n += n_pkts;
+
+		if (unlikely(n_pkts < pkt_burst_idx))
+			pkt_err = pkt_burst_idx - n_pkts;
+	}
+
+	do_data_copy_enqueue(dev, vq);
+
+	if (unlikely(pkt_err)) {
+		uint16_t descs_err = 0;
+		uint16_t buffers_err = 0;
+
+		num_async_pkts -= pkt_err;
+		pkt_idx -= pkt_err;
+	/* calculate the sum of buffers and descs of DMA-error packets. */
+		while (pkt_err-- > 0) {
+			descs_err +=
+				pkts_info[slot_idx & (vq->size - 1)].descs;
+			buffers_err +=
+				pkts_info[slot_idx & (vq->size - 1)].nr_buffers;
+			slot_idx--;
+		}
+
+		vq->async_packed_buffer_idx -= buffers_err;
+
+		if (vq->last_avail_idx >= descs_err) {
+			vq->last_avail_idx -= descs_err;
+
+			rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+				&async_descs[async_descs_idx - descs_err],
+				descs_err * sizeof(struct vring_packed_desc));
+		} else {
+			uint16_t nr_copy;
+
+			vq->last_avail_idx = vq->last_avail_idx + vq->size
+						- descs_err;
+			nr_copy = vq->size - vq->last_avail_idx;
+			rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+				&async_descs[async_descs_idx - descs_err],
+				nr_copy * sizeof(struct vring_packed_desc));
+			descs_err -= nr_copy;
+			rte_memcpy(vq->desc_packed,
+				&async_descs[async_descs_idx - descs_err],
+				descs_err * sizeof(struct vring_packed_desc));
+			vq->avail_wrap_counter ^= 1;
+		}
+
+		num_done_pkts = pkt_idx - num_async_pkts;
+	}
+	vq->async_pkts_idx += num_async_pkts;
+	*comp_count = num_done_pkts;
+
+	if (likely(vq->shadow_used_idx)) {
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+
+	return pkt_idx;
+}
+
 static __rte_always_inline void
 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 {
@@ -1693,12 +2045,39 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 	} while (nr_left > 0);
 }
 
+static __rte_always_inline void
+write_back_completed_descs_packed(struct vhost_virtqueue *vq,
+				uint16_t n_buffers)
+{
+	uint16_t nr_left = n_buffers;
+	uint16_t from, to;
+	do {
+		from = vq->last_async_buffer_idx &
+						(vq->size - 1);
+		to = (from + nr_left) & (vq->size - 1);
+		if (to > from) {
+			vhost_update_used_packed(vq,
+				vq->async_buffers_packed + from,
+				to - from);
+			vq->last_async_buffer_idx += nr_left;
+			nr_left = 0;
+		} else {
+			vhost_update_used_packed(vq,
+				vq->async_buffers_packed + from,
+				vq->size - from);
+			vq->last_async_buffer_idx +=
+						vq->size - from;
+			nr_left -= vq->size - from;
+		}
+	} while (nr_left > 0);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
@@ -1740,21 +2119,41 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto done;
 	}
 
-	for (i = 0; i < n_pkts_put; i++) {
-		from = (start_idx + i) & (vq_size - 1);
-		n_descs += pkts_info[from].descs;
-		pkts[i] = pkts_info[from].mbuf;
+	if (vq_is_packed(dev)) {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_buffers += pkts_info[from].nr_buffers;
+			pkts[i] = pkts_info[from].mbuf;
+		}
+	} else {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_descs += pkts_info[from].descs;
+			pkts[i] = pkts_info[from].mbuf;
+		}
 	}
+
 	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
 	vq->async_pkts_inflight_n -= n_pkts_put;
 
 	if (likely(vq->enabled && vq->access_ok)) {
-		write_back_completed_descs_split(vq, n_descs);
+		if (vq_is_packed(dev)) {
+			write_back_completed_descs_packed(vq, n_buffers);
 
-		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-		vhost_vring_call_split(dev, vq);
-	} else
-		vq->last_async_desc_idx += n_descs;
+			vhost_vring_call_packed(dev, vq);
+		} else {
+			write_back_completed_descs_split(vq, n_descs);
+
+			__atomic_add_fetch(&vq->used->idx, n_descs,
+					__ATOMIC_RELEASE);
+			vhost_vring_call_split(dev, vq);
+		}
+	} else {
+		if (vq_is_packed(dev))
+			vq->last_async_buffer_idx += n_buffers;
+		else
+			vq->last_async_desc_idx += n_descs;
+	}
 
 done:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -1795,9 +2194,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (count == 0)
 		goto out;
 
-	/* TODO: packed queue not implemented */
 	if (vq_is_packed(dev))
-		nb_tx = 0;
+		nb_tx = virtio_dev_rx_async_submit_packed(dev,
+				vq, queue_id, pkts, count, comp_pkts,
+				comp_count);
 	else
 		nb_tx = virtio_dev_rx_async_submit_split(dev,
 				vq, queue_id, pkts, count, comp_pkts,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
                     ` (2 preceding siblings ...)
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
@ 2021-04-10 10:25   ` Cheng Jiang
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async " Cheng Jiang
  4 siblings, 0 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

Add batch datapath for async vhost packed ring to improve the
performance of small packet.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/librte_vhost/virtio_net.c | 43 +++++++++++++++++++++++++++++++----
 1 file changed, 38 insertions(+), 5 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 2b8b873ca..c98fe6dbb 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1721,6 +1721,29 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline int
+virtio_dev_rx_async_batch_packed(struct virtio_net *dev,
+			   struct vhost_virtqueue *vq,
+			   struct rte_mbuf **pkts,
+			   struct rte_mbuf **comp_pkts, uint32_t *pkt_done)
+{
+	uint16_t i;
+	uint32_t cpy_threshold = vq->async_threshold;
+
+	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+		if (unlikely(pkts[i]->pkt_len >= cpy_threshold))
+			return -1;
+	}
+	if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
+		vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+			comp_pkts[(*pkt_done)++] = pkts[i];
+
+		return 0;
+	}
+
+	return -1;
+}
+
 static __rte_always_inline int
 vhost_enqueue_async_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
@@ -1844,6 +1867,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
 {
 	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint32_t remained = count;
 	uint16_t async_descs_idx = 0;
 	uint16_t num_buffers;
 	uint16_t num_desc;
@@ -1863,9 +1887,17 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 	uint32_t num_async_pkts = 0, num_done_pkts = 0;
 	struct vring_packed_desc async_descs[vq->size];
 
-	rte_prefetch0(&vq->desc_packed[vq->last_avail_idx & (vq->size - 1)]);
-
-	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+	do {
+		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx &
+							(vq->size - 1)]);
+		if (remained >= PACKED_BATCH_SIZE) {
+			if (!virtio_dev_rx_async_batch_packed(dev, vq,
+				&pkts[pkt_idx], comp_pkts, &num_done_pkts)) {
+				pkt_idx += PACKED_BATCH_SIZE;
+				remained -= PACKED_BATCH_SIZE;
+				continue;
+			}
+		}
 		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq,
 						pkts[pkt_idx],
 						&num_desc, &num_buffers,
@@ -1912,6 +1944,8 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		} else
 			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
 
+		pkt_idx++;
+		remained--;
 		vq_inc_last_avail_packed(vq, num_desc);
 
 		/*
@@ -1937,13 +1971,12 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 				 */
 				pkt_err = pkt_burst_idx - n_pkts;
 				pkt_burst_idx = 0;
-				pkt_idx++;
 				break;
 			}
 
 			pkt_burst_idx = 0;
 		}
-	}
+	} while (pkt_idx < count);
 
 	if (pkt_burst_idx) {
 		n_pkts = vq->async_ops.transfer_data(dev->vid,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async packed ring
  2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
                     ` (3 preceding siblings ...)
  2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
@ 2021-04-10 10:25   ` Cheng Jiang
  4 siblings, 0 replies; 13+ messages in thread
From: Cheng Jiang @ 2021-04-10 10:25 UTC (permalink / raw)
  To: maxime.coquelin, chenbo.xia
  Cc: dev, jiayu.hu, yvonnex.yang, yinan.wang, Cheng Jiang

Add release note for the support of vhost async packed ring.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 doc/guides/rel_notes/release_21_05.rst | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_05.rst b/doc/guides/rel_notes/release_21_05.rst
index 374d6d98e..eb5200669 100644
--- a/doc/guides/rel_notes/release_21_05.rst
+++ b/doc/guides/rel_notes/release_21_05.rst
@@ -131,6 +131,10 @@ New Features
   * Added command to display Rx queue used descriptor count.
     ``show port (port_id) rxq (queue_id) desc used count``
 
+* **Added support for vhost async packed ring data path.**
+
+  Added packed ring support for async vhost.
+
 
 Removed Items
 -------------
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-04-10 10:38 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-17  8:54 [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost Cheng Jiang
2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
2021-03-24  9:19   ` Liu, Yong
2021-03-29 12:29     ` Jiang, Cheng1
2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
2021-04-07  6:26   ` Hu, Jiayu
2021-04-08 12:01     ` Jiang, Cheng1
2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-10 10:25   ` Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async " Cheng Jiang

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git