DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, jiayu.hu@intel.com,
	yuanx.wang@intel.com, wenwux.ma@intel.com,
	bruce.richardson@intel.com, john.mcnamara@intel.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [RFC 03/14] vhost: simplify async IO vectors
Date: Fri,  8 Oct 2021 00:00:02 +0200	[thread overview]
Message-ID: <20211007220013.355530-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20211007220013.355530-1-maxime.coquelin@redhat.com>

IO vectors implementation is unnecessarily complex, mixing
source and destinations vectors in the same array.

This patch declares two arrays, one for the source and one
for the destination. It also get rid off seg_awaits variable
in both packed and split implementation, which is the same
as iovec_idx.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 lib/vhost/vhost.h      |  5 +++--
 lib/vhost/virtio_net.c | 28 +++++++++++-----------------
 2 files changed, 14 insertions(+), 19 deletions(-)

diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 9de87d20cc..f2d9535174 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -49,7 +49,7 @@
 #define MAX_PKT_BURST 32
 
 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
-#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
 
 #define PACKED_DESC_ENQUEUE_USED_FLAG(w)	\
 	((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
@@ -133,7 +133,8 @@ struct vhost_async {
 	struct rte_vhost_async_channel_ops ops;
 
 	struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
-	struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
+	struct iovec src_iovec[VHOST_MAX_ASYNC_VEC];
+	struct iovec dst_iovec[VHOST_MAX_ASYNC_VEC];
 
 	/* data transfer status */
 	struct async_inflight_info *pkts_info;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index a109c2a316..4e0e1584b8 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1512,14 +1512,12 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 
 	struct vhost_async *async = vq->async;
 	struct rte_vhost_iov_iter *it_pool = async->it_pool;
-	struct iovec *vec_pool = async->vec_pool;
 	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
-	struct iovec *src_iovec = vec_pool;
-	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct iovec *src_iovec = async->src_iovec;
+	struct iovec *dst_iovec = async->dst_iovec;
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	int32_t n_xfer;
-	uint16_t segs_await = 0;
 	uint16_t iovec_idx = 0, it_idx = 0, slot_idx = 0;
 
 	/*
@@ -1562,7 +1560,6 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 
 		iovec_idx += it_pool[it_idx].nr_segs;
-		segs_await += it_pool[it_idx].nr_segs;
 		it_idx += 2;
 
 		vq->last_avail_idx += num_buffers;
@@ -1573,8 +1570,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 		 * - unused async iov number is less than max vhost vector
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
-			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
-			BUF_VECTOR_MAX))) {
+			(VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX))) {
 			n_xfer = async->ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
 			if (likely(n_xfer >= 0)) {
@@ -1588,7 +1584,6 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
 
 			iovec_idx = 0;
 			it_idx = 0;
-			segs_await = 0;
 
 			if (unlikely(n_pkts < pkt_burst_idx)) {
 				/*
@@ -1745,8 +1740,11 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
 		if (unlikely(++tries > max_tries))
 			return -1;
 
-		if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &nr_vec,
-						&buf_id, &len, VHOST_ACCESS_RW) < 0))
+		if (unlikely(fill_vec_buf_packed(dev, vq,
+						avail_idx, &desc_count,
+						buf_vec, &nr_vec,
+						&buf_id, &len,
+						VHOST_ACCESS_RW) < 0))
 			return -1;
 
 		len = RTE_MIN(len, size);
@@ -1832,14 +1830,12 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 
 	struct vhost_async *async = vq->async;
 	struct rte_vhost_iov_iter *it_pool = async->it_pool;
-	struct iovec *vec_pool = async->vec_pool;
 	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
-	struct iovec *src_iovec = vec_pool;
-	struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+	struct iovec *src_iovec = async->src_iovec;
+	struct iovec *dst_iovec = async->dst_iovec;
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	uint32_t n_pkts = 0, pkt_err = 0;
 	uint16_t slot_idx = 0;
-	uint16_t segs_await = 0;
 	uint16_t iovec_idx = 0, it_idx = 0;
 
 	do {
@@ -1861,7 +1857,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		pkts_info[slot_idx].nr_buffers = num_buffers;
 		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 		iovec_idx += it_pool[it_idx].nr_segs;
-		segs_await += it_pool[it_idx].nr_segs;
 		it_idx += 2;
 
 		pkt_idx++;
@@ -1874,7 +1869,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 		 * - unused async iov number is less than max vhost vector
 		 */
 		if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
-			((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
+			(VHOST_MAX_ASYNC_VEC - iovec_idx < BUF_VECTOR_MAX))) {
 			n_xfer = async->ops.transfer_data(dev->vid,
 					queue_id, tdes, 0, pkt_burst_idx);
 			if (likely(n_xfer >= 0)) {
@@ -1888,7 +1883,6 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
 
 			iovec_idx = 0;
 			it_idx = 0;
-			segs_await = 0;
 
 			if (unlikely(n_pkts < pkt_burst_idx)) {
 				/*
-- 
2.31.1


  parent reply	other threads:[~2021-10-07 22:14 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-07 21:59 [dpdk-dev] [RFC 00/14] vhost: clean-up and simplify async implementation Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 01/14] vhost: move async data in a dedicated structure Maxime Coquelin
2021-10-14  3:24   ` Hu, Jiayu
2021-10-14  8:54     ` Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 02/14] vhost: hide inflight async structure Maxime Coquelin
2021-10-07 22:00 ` Maxime Coquelin [this message]
2021-10-07 22:00 ` [dpdk-dev] [RFC 04/14] vhost: simplify async IO vectors iterators Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 05/14] vhost: remove async batch threshold Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 06/14] vhost: introduce specific iovec structure Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 07/14] vhost: remove useless fields in async iterator struct Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 08/14] vhost: improve IO vector logic Maxime Coquelin
2021-10-12  6:05   ` Hu, Jiayu
2021-10-12  8:34     ` Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 09/14] vhost: remove notion of async descriptor Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 10/14] vhost: simplify async enqueue completion Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 11/14] vhost: simplify getting the first in-flight index Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 12/14] vhost: prepare async for mbuf to desc refactoring Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 13/14] vhost: prepare sync " Maxime Coquelin
2021-10-07 22:00 ` [dpdk-dev] [RFC 14/14] vhost: merge sync and async mbuf to desc filling Maxime Coquelin
2021-10-08 12:36 ` [dpdk-dev] [RFC 00/14] vhost: clean-up and simplify async implementation David Marchand
2021-10-12  6:24 ` Hu, Jiayu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211007220013.355530-4-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=bruce.richardson@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=wenwux.ma@intel.com \
    --cc=yuanx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).