DPDK patches and discussions
 help / color / mirror / Atom feed
From: Cheng Jiang <Cheng1.jiang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, yvonnex.yang@intel.com,
	yinan.wang@intel.com, yong.liu@intel.com,
	Cheng Jiang <Cheng1.jiang@intel.com>
Subject: [dpdk-dev] [PATCH v9 2/4] vhost: add support for packed ring in async vhost
Date: Tue, 27 Apr 2021 08:03:33 +0000	[thread overview]
Message-ID: <20210427080335.20246-3-Cheng1.jiang@intel.com> (raw)
In-Reply-To: <20210427080335.20246-1-Cheng1.jiang@intel.com>

For now async vhost data path only supports split ring. This patch
enables packed ring in async vhost data path to make async vhost
compatible with virtio 1.1 spec.

Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
---
 lib/vhost/rte_vhost_async.h |   1 +
 lib/vhost/vhost.c           |  79 +++++--
 lib/vhost/vhost.h           |  15 +-
 lib/vhost/virtio_net.c      | 442 ++++++++++++++++++++++++++++++++++--
 4 files changed, 489 insertions(+), 48 deletions(-)

diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index c855ff875e..6faa31f5ad 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
 struct async_inflight_info {
 	struct rte_mbuf *mbuf;
 	uint16_t descs; /* num of descs inflight */
+	uint16_t nr_buffers; /* num of buffers inflight for packed ring */
 };
 
 /**
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index a70fe01d8f..2e3f9eb095 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -340,17 +340,17 @@ cleanup_device(struct virtio_net *dev, int destroy)
 static void
 vhost_free_async_mem(struct vhost_virtqueue *vq)
 {
-	if (vq->async_pkts_info)
-		rte_free(vq->async_pkts_info);
-	if (vq->async_descs_split)
-		rte_free(vq->async_descs_split);
-	if (vq->it_pool)
-		rte_free(vq->it_pool);
-	if (vq->vec_pool)
-		rte_free(vq->vec_pool);
+	rte_free(vq->async_pkts_info);
 
-	vq->async_pkts_info = NULL;
+	rte_free(vq->async_buffers_packed);
+	vq->async_buffers_packed = NULL;
+	rte_free(vq->async_descs_split);
 	vq->async_descs_split = NULL;
+
+	rte_free(vq->it_pool);
+	rte_free(vq->vec_pool);
+
+	vq->async_pkts_info = NULL;
 	vq->it_pool = NULL;
 	vq->vec_pool = NULL;
 }
@@ -360,10 +360,10 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
 	if (vq_is_packed(dev))
 		rte_free(vq->shadow_used_packed);
-	else {
+	else
 		rte_free(vq->shadow_used_split);
-		vhost_free_async_mem(vq);
-	}
+
+	vhost_free_async_mem(vq);
 	rte_free(vq->batch_copy_elems);
 	if (vq->iotlb_pool)
 		rte_mempool_free(vq->iotlb_pool);
@@ -1626,10 +1626,9 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	if (unlikely(vq == NULL || !dev->async_copy))
 		return -1;
 
-	/* packed queue is not supported */
-	if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
+	if (unlikely(!f.async_inorder)) {
 		VHOST_LOG_CONFIG(ERR,
-			"async copy is not supported on packed queue or non-inorder mode "
+			"async copy is not supported on non-inorder mode "
 			"(vid %d, qid: %d)\n", vid, queue_id);
 		return -1;
 	}
@@ -1661,24 +1660,60 @@ int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
 	vq->async_pkts_info = rte_malloc_socket(NULL,
 			vq->size * sizeof(struct async_inflight_info),
 			RTE_CACHE_LINE_SIZE, node);
+	if (!vq->async_pkts_info) {
+		vhost_free_async_mem(vq);
+		VHOST_LOG_CONFIG(ERR,
+			"async register failed: cannot allocate memory for async_pkts_info "
+			"(vid %d, qid: %d)\n", vid, queue_id);
+		goto reg_out;
+	}
+
 	vq->it_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_IT * sizeof(struct rte_vhost_iov_iter),
 			RTE_CACHE_LINE_SIZE, node);
+	if (!vq->it_pool) {
+		vhost_free_async_mem(vq);
+		VHOST_LOG_CONFIG(ERR,
+			"async register failed: cannot allocate memory for it_pool "
+			"(vid %d, qid: %d)\n", vid, queue_id);
+		goto reg_out;
+	}
+
 	vq->vec_pool = rte_malloc_socket(NULL,
 			VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
 			RTE_CACHE_LINE_SIZE, node);
-	vq->async_descs_split = rte_malloc_socket(NULL,
-			vq->size * sizeof(struct vring_used_elem),
-			RTE_CACHE_LINE_SIZE, node);
-	if (!vq->async_descs_split || !vq->async_pkts_info ||
-		!vq->it_pool || !vq->vec_pool) {
+	if (!vq->vec_pool) {
 		vhost_free_async_mem(vq);
 		VHOST_LOG_CONFIG(ERR,
-				"async register failed: cannot allocate memory for vq data "
-				"(vid %d, qid: %d)\n", vid, queue_id);
+			"async register failed: cannot allocate memory for vec_pool "
+			"(vid %d, qid: %d)\n", vid, queue_id);
 		goto reg_out;
 	}
 
+	if (vq_is_packed(dev)) {
+		vq->async_buffers_packed = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem_packed),
+			RTE_CACHE_LINE_SIZE, node);
+		if (!vq->async_buffers_packed) {
+			vhost_free_async_mem(vq);
+			VHOST_LOG_CONFIG(ERR,
+				"async register failed: cannot allocate memory for async buffers "
+				"(vid %d, qid: %d)\n", vid, queue_id);
+			goto reg_out;
+		}
+	} else {
+		vq->async_descs_split = rte_malloc_socket(NULL,
+			vq->size * sizeof(struct vring_used_elem),
+			RTE_CACHE_LINE_SIZE, node);
+		if (!vq->async_descs_split) {
+			vhost_free_async_mem(vq);
+			VHOST_LOG_CONFIG(ERR,
+				"async register failed: cannot allocate memory for async descs "
+				"(vid %d, qid: %d)\n", vid, queue_id);
+			goto reg_out;
+		}
+	}
+
 	vq->async_ops.check_completed_copies = ops->check_completed_copies;
 	vq->async_ops.transfer_data = ops->transfer_data;
 
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index f628714c24..b303635645 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -201,9 +201,18 @@ struct vhost_virtqueue {
 	uint16_t	async_pkts_idx;
 	uint16_t	async_pkts_inflight_n;
 	uint16_t	async_last_pkts_n;
-	struct vring_used_elem  *async_descs_split;
-	uint16_t async_desc_idx;
-	uint16_t last_async_desc_idx;
+	union {
+		struct vring_used_elem  *async_descs_split;
+		struct vring_used_elem_packed *async_buffers_packed;
+	};
+	union {
+		uint16_t async_desc_idx_split;
+		uint16_t async_buffer_idx_packed;
+	};
+	union {
+		uint16_t last_async_desc_idx_split;
+		uint16_t last_async_buffer_idx_packed;
+	};
 
 	/* vq async features */
 	bool		async_inorder;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 438bdafd14..5d540e5599 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -363,14 +363,14 @@ vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
 }
 
 static __rte_always_inline void
-vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
-				   struct vhost_virtqueue *vq,
-				   uint32_t len[],
-				   uint16_t id[],
-				   uint16_t count[],
+vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
+				   uint32_t *len,
+				   uint16_t *id,
+				   uint16_t *count,
 				   uint16_t num_buffers)
 {
 	uint16_t i;
+
 	for (i = 0; i < num_buffers; i++) {
 		/* enqueue shadow flush action aligned with batch num */
 		if (!vq->shadow_used_idx)
@@ -382,6 +382,17 @@ vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
 		vq->shadow_aligned_idx += count[i];
 		vq->shadow_used_idx++;
 	}
+}
+
+static __rte_always_inline void
+vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
+				   struct vhost_virtqueue *vq,
+				   uint32_t *len,
+				   uint16_t *id,
+				   uint16_t *count,
+				   uint16_t num_buffers)
+{
+	vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
 
 	if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
 		do_data_copy_enqueue(dev, vq);
@@ -1474,6 +1485,23 @@ store_dma_desc_info_split(struct vring_used_elem *s_ring, struct vring_used_elem
 	}
 }
 
+static __rte_always_inline void
+store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
+		struct vring_used_elem_packed *d_ring,
+		uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t count)
+{
+	uint16_t elem_size = sizeof(struct vring_used_elem_packed);
+
+	if (d_idx + count <= ring_size) {
+		rte_memcpy(d_ring + d_idx, s_ring + s_idx, count * elem_size);
+	} else {
+		uint16_t size = ring_size - d_idx;
+
+		rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
+		rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) * elem_size);
+	}
+}
+
 static __rte_noinline uint32_t
 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	struct vhost_virtqueue *vq, uint16_t queue_id,
@@ -1556,12 +1584,12 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			 * descriptors.
 			 */
 			from = vq->shadow_used_idx - num_buffers;
-			to = vq->async_desc_idx & (vq->size - 1);
+			to = vq->async_desc_idx_split & (vq->size - 1);
 
 			store_dma_desc_info_split(vq->shadow_used_split,
 					vq->async_descs_split, vq->size, from, to, num_buffers);
 
-			vq->async_desc_idx += num_buffers;
+			vq->async_desc_idx_split += num_buffers;
 			vq->shadow_used_idx -= num_buffers;
 		} else
 			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
@@ -1619,7 +1647,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 			num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
 			slot_idx--;
 		}
-		vq->async_desc_idx -= num_descs;
+		vq->async_desc_idx_split -= num_descs;
 		/* recover shadow used ring and available ring */
 		vq->shadow_used_idx -= (vq->last_avail_idx -
 				async_pkts_log[num_async_pkts].last_avail_idx -
@@ -1641,6 +1669,330 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 	return pkt_idx;
 }
 
+static __rte_always_inline void
+vhost_update_used_packed(struct vhost_virtqueue *vq,
+			struct vring_used_elem_packed *shadow_ring,
+			uint16_t count)
+{
+	int i;
+	uint16_t used_idx = vq->last_used_idx;
+	uint16_t head_idx = vq->last_used_idx;
+	uint16_t head_flags = 0;
+
+	if (count == 0)
+		return;
+
+	/* Split loop in two to save memory barriers */
+	for (i = 0; i < count; i++) {
+		vq->desc_packed[used_idx].id = shadow_ring[i].id;
+		vq->desc_packed[used_idx].len = shadow_ring[i].len;
+
+		used_idx += shadow_ring[i].count;
+		if (used_idx >= vq->size)
+			used_idx -= vq->size;
+	}
+
+	/* The ordering for storing desc flags needs to be enforced. */
+	rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+	for (i = 0; i < count; i++) {
+		uint16_t flags;
+
+		if (vq->shadow_used_packed[i].len)
+			flags = VRING_DESC_F_WRITE;
+		else
+			flags = 0;
+
+		if (vq->used_wrap_counter) {
+			flags |= VRING_DESC_F_USED;
+			flags |= VRING_DESC_F_AVAIL;
+		} else {
+			flags &= ~VRING_DESC_F_USED;
+			flags &= ~VRING_DESC_F_AVAIL;
+		}
+
+		if (i > 0) {
+			vq->desc_packed[vq->last_used_idx].flags = flags;
+		} else {
+			head_idx = vq->last_used_idx;
+			head_flags = flags;
+		}
+
+		vq_inc_last_used_packed(vq, shadow_ring[i].count);
+	}
+
+	vq->desc_packed[head_idx].flags = head_flags;
+}
+
+static __rte_always_inline int
+vhost_enqueue_async_single_packed(struct virtio_net *dev,
+			    struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt,
+			    struct buf_vector *buf_vec,
+			    uint16_t *nr_descs,
+			    uint16_t *nr_buffers,
+			    struct vring_packed_desc *async_descs,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it,
+			    struct rte_vhost_iov_iter *dst_it)
+{
+	uint16_t nr_vec = 0;
+	uint16_t avail_idx = vq->last_avail_idx;
+	uint16_t max_tries, tries = 0;
+	uint16_t buf_id = 0;
+	uint32_t len = 0;
+	uint16_t desc_count = 0;
+	uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	uint32_t buffer_len[vq->size];
+	uint16_t buffer_buf_id[vq->size];
+	uint16_t buffer_desc_count[vq->size];
+
+	if (rxvq_is_mergeable(dev))
+		max_tries = vq->size - 1;
+	else
+		max_tries = 1;
+
+	while (size > 0) {
+		/*
+		 * if we tried all available ring items, and still
+		 * can't get enough buf, it means something abnormal
+		 * happened.
+		 */
+		if (unlikely(++tries > max_tries))
+			return -1;
+
+		if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
+						&buf_id, &len, VHOST_ACCESS_RW) < 0))
+			return -1;
+
+		len = RTE_MIN(len, size);
+		size -= len;
+
+		buffer_len[*nr_buffers] = len;
+		buffer_buf_id[*nr_buffers] = buf_id;
+		buffer_desc_count[*nr_buffers] = desc_count;
+		*nr_buffers += 1;
+
+		*nr_descs += desc_count;
+		avail_idx += desc_count;
+		if (avail_idx >= vq->size)
+			avail_idx -= vq->size;
+	}
+
+	if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, src_iovec, dst_iovec,
+			src_it, dst_it) < 0)
+		return -1;
+	/* store descriptors for DMA */
+	if (avail_idx >= *nr_descs) {
+		rte_memcpy(async_descs, &vq->desc_packed[vq->last_avail_idx],
+			*nr_descs * sizeof(struct vring_packed_desc));
+	} else {
+		uint16_t nr_copy = vq->size - vq->last_avail_idx;
+
+		rte_memcpy(async_descs, &vq->desc_packed[vq->last_avail_idx],
+			nr_copy * sizeof(struct vring_packed_desc));
+		rte_memcpy(async_descs + nr_copy, vq->desc_packed,
+			(*nr_descs - nr_copy) * sizeof(struct vring_packed_desc));
+	}
+
+	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
+
+	return 0;
+}
+
+static __rte_always_inline int16_t
+virtio_dev_rx_async_single_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+			    struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t *nr_buffers,
+			    struct vring_packed_desc *async_descs,
+			    struct iovec *src_iovec, struct iovec *dst_iovec,
+			    struct rte_vhost_iov_iter *src_it, struct rte_vhost_iov_iter *dst_it)
+{
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
+
+	if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt, buf_vec, nr_descs, nr_buffers,
+						 async_descs, src_iovec, dst_iovec,
+						 src_it, dst_it) < 0)) {
+		VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
+		return -1;
+	}
+
+	VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+
+	return 0;
+}
+
+static __rte_always_inline void
+dma_error_handler_packed(struct vhost_virtqueue *vq, struct vring_packed_desc *async_descs,
+			uint16_t async_descs_idx, uint16_t slot_idx, uint32_t nr_err,
+			uint32_t *pkt_idx, uint32_t *num_async_pkts, uint32_t *num_done_pkts)
+{
+	uint16_t descs_err = 0;
+	uint16_t buffers_err = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+
+	*num_async_pkts -= nr_err;
+	*pkt_idx -= nr_err;
+	/* calculate the sum of buffers and descs of DMA-error packets. */
+	while (nr_err-- > 0) {
+		descs_err += pkts_info[slot_idx % vq->size].descs;
+		buffers_err += pkts_info[slot_idx % vq->size].nr_buffers;
+		slot_idx--;
+	}
+
+	vq->async_buffer_idx_packed -= buffers_err;
+
+	if (vq->last_avail_idx >= descs_err) {
+		vq->last_avail_idx -= descs_err;
+
+		rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+			&async_descs[async_descs_idx - descs_err],
+			descs_err * sizeof(struct vring_packed_desc));
+	} else {
+		uint16_t nr_copy;
+
+		vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
+		nr_copy = vq->size - vq->last_avail_idx;
+		rte_memcpy(&vq->desc_packed[vq->last_avail_idx],
+			&async_descs[async_descs_idx - descs_err],
+			nr_copy * sizeof(struct vring_packed_desc));
+		descs_err -= nr_copy;
+		rte_memcpy(&vq->desc_packed[0], &async_descs[async_descs_idx - descs_err],
+			descs_err * sizeof(struct vring_packed_desc));
+		vq->avail_wrap_counter ^= 1;
+	}
+
+	*num_done_pkts = *pkt_idx - *num_async_pkts;
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
+	struct vhost_virtqueue *vq, uint16_t queue_id,
+	struct rte_mbuf **pkts, uint32_t count,
+	struct rte_mbuf **comp_pkts, uint32_t *comp_count)
+{
+	uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+	uint16_t async_descs_idx = 0;
+	uint16_t num_buffers;
+	uint16_t num_descs;
+
+	struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+	struct iovec *vec_pool = vq->vec_pool;
+	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+	struct iovec *src_iovec = vec_pool;
+	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	uint16_t slot_idx = 0;
+	uint16_t segs_await = 0;
+	uint16_t iovec_idx = 0, it_idx = 0;
+	struct async_inflight_info *pkts_info = vq->async_pkts_info;
+	uint32_t n_pkts = 0, pkt_err = 0;
+	uint32_t num_async_pkts = 0, num_done_pkts = 0;
+	struct vring_packed_desc async_descs[vq->size];
+
+	rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
+
+	for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+		num_buffers = 0;
+		num_descs = 0;
+
+		if (unlikely(virtio_dev_rx_async_single_packed(dev, vq, pkts[pkt_idx],
+						&num_descs, &num_buffers,
+						&async_descs[async_descs_idx],
+						&src_iovec[iovec_idx], &dst_iovec[iovec_idx],
+						&it_pool[it_idx], &it_pool[it_idx + 1]) < 0))
+			break;
+
+		VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+			dev->vid, vq->last_avail_idx,
+			vq->last_avail_idx + num_descs);
+
+		slot_idx = (vq->async_pkts_idx + num_async_pkts) % vq->size;
+		if (it_pool[it_idx].count) {
+			uint16_t from, to;
+
+			async_descs_idx += num_descs;
+			async_fill_desc(&tdes[pkt_burst_idx++],
+				&it_pool[it_idx], &it_pool[it_idx + 1]);
+			pkts_info[slot_idx].descs = num_descs;
+			pkts_info[slot_idx].nr_buffers = num_buffers;
+			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+			num_async_pkts++;
+			iovec_idx += it_pool[it_idx].nr_segs;
+			it_idx += 2;
+
+			segs_await += it_pool[it_idx].nr_segs;
+
+			/**
+			 * recover shadow used ring and keep DMA-occupied
+			 * descriptors.
+			 */
+			from = vq->shadow_used_idx - num_buffers;
+			to = vq->async_buffer_idx_packed % vq->size;
+			store_dma_desc_info_packed(vq->shadow_used_packed,
+					vq->async_buffers_packed, vq->size, from, to, num_buffers);
+
+			vq->async_buffer_idx_packed += num_buffers;
+			vq->shadow_used_idx -= num_buffers;
+		} else {
+			comp_pkts[num_done_pkts++] = pkts[pkt_idx];
+		}
+
+		vq_inc_last_avail_packed(vq, num_descs);
+
+		/*
+		 * conditions to trigger async device transfer:
+		 * - buffered packet number reaches transfer threshold
+		 * - unused async iov number is less than max vhost vector
+		 */
+		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
+			n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
+				tdes, 0, pkt_burst_idx);
+			iovec_idx = 0;
+			it_idx = 0;
+			segs_await = 0;
+			vq->async_pkts_inflight_n += n_pkts;
+
+			if (unlikely(n_pkts < pkt_burst_idx)) {
+				/*
+				 * log error packets number here and do actual
+				 * error processing when applications poll
+				 * completion
+				 */
+				pkt_err = pkt_burst_idx - n_pkts;
+				pkt_burst_idx = 0;
+				pkt_idx++;
+				break;
+			}
+
+			pkt_burst_idx = 0;
+		}
+	}
+
+	if (pkt_burst_idx) {
+		n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+		vq->async_pkts_inflight_n += n_pkts;
+
+		if (unlikely(n_pkts < pkt_burst_idx))
+			pkt_err = pkt_burst_idx - n_pkts;
+	}
+
+	do_data_copy_enqueue(dev, vq);
+
+	if (unlikely(pkt_err))
+		dma_error_handler_packed(vq, async_descs, async_descs_idx, slot_idx, pkt_err,
+					&pkt_idx, &num_async_pkts, &num_done_pkts);
+	vq->async_pkts_idx += num_async_pkts;
+	*comp_count = num_done_pkts;
+
+	if (likely(vq->shadow_used_idx)) {
+		vhost_flush_enqueue_shadow_packed(dev, vq);
+		vhost_vring_call_packed(dev, vq);
+	}
+
+	return pkt_idx;
+}
+
 static __rte_always_inline void
 write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 {
@@ -1649,7 +2001,7 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 	uint16_t to, from;
 
 	do {
-		from = vq->last_async_desc_idx & (vq->size - 1);
+		from = vq->last_async_desc_idx_split & (vq->size - 1);
 		nr_copy = nr_left + from <= vq->size ? nr_left : vq->size - from;
 		to = vq->last_used_idx & (vq->size - 1);
 
@@ -1665,18 +2017,41 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
 					(nr_copy - size) * sizeof(struct vring_used_elem));
 		}
 
-		vq->last_async_desc_idx += nr_copy;
+		vq->last_async_desc_idx_split += nr_copy;
 		vq->last_used_idx += nr_copy;
 		nr_left -= nr_copy;
 	} while (nr_left > 0);
 }
 
+static __rte_always_inline void
+write_back_completed_descs_packed(struct vhost_virtqueue *vq,
+				uint16_t n_buffers)
+{
+	uint16_t nr_left = n_buffers;
+	uint16_t from, to;
+
+	do {
+		from = vq->last_async_buffer_idx_packed % vq->size;
+		to = (from + nr_left) % vq->size;
+		if (to > from) {
+			vhost_update_used_packed(vq, vq->async_buffers_packed + from, to - from);
+			vq->last_async_buffer_idx_packed += nr_left;
+			nr_left = 0;
+		} else {
+			vhost_update_used_packed(vq, vq->async_buffers_packed + from,
+				vq->size - from);
+			vq->last_async_buffer_idx_packed += vq->size - from;
+			nr_left -= vq->size - from;
+		}
+	} while (nr_left > 0);
+}
+
 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		struct rte_mbuf **pkts, uint16_t count)
 {
 	struct virtio_net *dev = get_device(vid);
 	struct vhost_virtqueue *vq;
-	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+	uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
 	uint16_t start_idx, pkts_idx, vq_size;
 	struct async_inflight_info *pkts_info;
 	uint16_t from, i;
@@ -1701,7 +2076,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 
 	rte_spinlock_lock(&vq->access_lock);
 
-	pkts_idx = vq->async_pkts_idx & (vq->size - 1);
+	pkts_idx = vq->async_pkts_idx % vq->size;
 	pkts_info = vq->async_pkts_info;
 	vq_size = vq->size;
 	start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
@@ -1718,21 +2093,41 @@ uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 		goto done;
 	}
 
-	for (i = 0; i < n_pkts_put; i++) {
-		from = (start_idx + i) & (vq_size - 1);
-		n_descs += pkts_info[from].descs;
-		pkts[i] = pkts_info[from].mbuf;
+	if (vq_is_packed(dev)) {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_buffers += pkts_info[from].nr_buffers;
+			pkts[i] = pkts_info[from].mbuf;
+		}
+	} else {
+		for (i = 0; i < n_pkts_put; i++) {
+			from = (start_idx + i) & (vq_size - 1);
+			n_descs += pkts_info[from].descs;
+			pkts[i] = pkts_info[from].mbuf;
+		}
 	}
+
 	vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
 	vq->async_pkts_inflight_n -= n_pkts_put;
 
 	if (likely(vq->enabled && vq->access_ok)) {
-		write_back_completed_descs_split(vq, n_descs);
+		if (vq_is_packed(dev)) {
+			write_back_completed_descs_packed(vq, n_buffers);
 
-		__atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
-		vhost_vring_call_split(dev, vq);
-	} else
-		vq->last_async_desc_idx += n_descs;
+			vhost_vring_call_packed(dev, vq);
+		} else {
+			write_back_completed_descs_split(vq, n_descs);
+
+			__atomic_add_fetch(&vq->used->idx, n_descs,
+					__ATOMIC_RELEASE);
+			vhost_vring_call_split(dev, vq);
+		}
+	} else {
+		if (vq_is_packed(dev))
+			vq->last_async_buffer_idx_packed += n_buffers;
+		else
+			vq->last_async_desc_idx_split += n_descs;
+	}
 
 done:
 	rte_spinlock_unlock(&vq->access_lock);
@@ -1773,9 +2168,10 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
 	if (count == 0)
 		goto out;
 
-	/* TODO: packed queue not implemented */
 	if (vq_is_packed(dev))
-		nb_tx = 0;
+		nb_tx = virtio_dev_rx_async_submit_packed(dev,
+				vq, queue_id, pkts, count, comp_pkts,
+				comp_count);
 	else
 		nb_tx = virtio_dev_rx_async_submit_split(dev,
 				vq, queue_id, pkts, count, comp_pkts,
-- 
2.29.2


  parent reply	other threads:[~2021-04-27  8:18 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-17  8:54 [dpdk-dev] [PATCH] " Cheng Jiang
2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
2021-03-24  9:19   ` Liu, Yong
2021-03-29 12:29     ` Jiang, Cheng1
2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
2021-04-07  6:26   ` Hu, Jiayu
2021-04-08 12:01     ` Jiang, Cheng1
2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-10 10:25   ` Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-12 11:34 ` [dpdk-dev] [PATCH v5 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-13  2:44     ` Hu, Jiayu
2021-04-13  3:26       ` Jiang, Cheng1
2021-04-13  7:11     ` Maxime Coquelin
2021-04-13  9:06       ` Jiang, Cheng1
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-13  8:36     ` Maxime Coquelin
2021-04-13 11:48       ` Jiang, Cheng1
2021-04-13 13:08         ` Maxime Coquelin
2021-04-13 13:50           ` Jiang, Cheng1
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-13 14:55 ` [dpdk-dev] [PATCH v6 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-14  6:13 ` [dpdk-dev] [PATCH v7 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-14 12:24     ` Maxime Coquelin
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-14 13:40     ` Maxime Coquelin
2021-04-15  5:42       ` Jiang, Cheng1
2021-04-15  2:02     ` Hu, Jiayu
2021-04-15  5:54       ` Jiang, Cheng1
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-19  8:51 ` [dpdk-dev] [PATCH v8 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-27  1:19     ` Hu, Jiayu
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-27  5:16     ` Hu, Jiayu
2021-04-27  6:07       ` Jiang, Cheng1
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-27  8:03 ` [dpdk-dev] [PATCH v9 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-27  8:03   ` Cheng Jiang [this message]
2021-04-29  1:48     ` [dpdk-dev] [PATCH v9 2/4] vhost: add support for packed ring in async vhost Hu, Jiayu
2021-04-29  9:50     ` Maxime Coquelin
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-29  9:57     ` Maxime Coquelin
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-29  9:58     ` Maxime Coquelin
2021-05-04 18:38     ` Ferruh Yigit
2021-05-04  8:28   ` [dpdk-dev] [PATCH v9 0/4] add support for packed ring in async vhost Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210427080335.20246-3-Cheng1.jiang@intel.com \
    --to=cheng1.jiang@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=yinan.wang@intel.com \
    --cc=yong.liu@intel.com \
    --cc=yvonnex.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).