DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Hu, Jiayu" <jiayu.hu@intel.com>
To: "Jiang, Cheng1" <cheng1.jiang@intel.com>,
	"maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"Xia, Chenbo" <chenbo.xia@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
	"Yang, YvonneX" <yvonnex.yang@intel.com>,
	"Wang, Yinan" <yinan.wang@intel.com>,
	"Liu, Yong" <yong.liu@intel.com>
Subject: Re: [dpdk-dev] [PATCH v5 1/4] vhost: abstract and reorganize async split ring code
Date: Tue, 13 Apr 2021 02:44:07 +0000
Message-ID: <c65eb35b2a3048eb9be1320c0d45c198@intel.com> (raw)
In-Reply-To: <20210412113430.17587-2-Cheng1.jiang@intel.com>

Hi Cheng,

Some comments inline.

> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Monday, April 12, 2021 7:34 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Wang, Yinan <yinan.wang@intel.com>; Liu,
> Yong <yong.liu@intel.com>; Jiang, Cheng1 <cheng1.jiang@intel.com>
> Subject: [PATCH v5 1/4] vhost: abstract and reorganize async split ring code
> 
> In order to improve code efficiency and readability when async packed
> ring support is enabled. This patch abstract some functions like
> shadow_ring_store and write_back_completed_descs_split. And improve
> the efficiency of some pointer offset calculation.

Need to improve grammar for commit log, as there is typo and incomplete
sentence.

> 
> Signed-off-by: Cheng Jiang <Cheng1.jiang@intel.com>
> ---
>  lib/librte_vhost/virtio_net.c | 146 +++++++++++++++++++---------------
>  1 file changed, 84 insertions(+), 62 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index ff3987860..c43ab0093 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -1458,6 +1458,29 @@ virtio_dev_rx_async_get_info_idx(uint16_t
> pkts_idx,
>  		(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
>  }
> 
> +static __rte_always_inline void
> +shadow_ring_store(struct vhost_virtqueue *vq,  void *shadow_ring, void
> *d_ring,
> +		uint16_t s_idx, uint16_t d_idx,
> +		uint16_t count, uint16_t elem_size)
> +{
> +	if (d_idx + count <= vq->size) {
> +		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
> +			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
> +			count * elem_size);
> +	} else {
> +		uint16_t size = vq->size - d_idx;
> +
> +		rte_memcpy((void *)((uintptr_t)d_ring + d_idx * elem_size),
> +			(void *)((uintptr_t)shadow_ring + s_idx * elem_size),
> +			size * elem_size);
> +
> +		rte_memcpy((void *)((uintptr_t)d_ring),
> +			(void *)((uintptr_t)shadow_ring +
> +				(s_idx + size) * elem_size),
> +			(count - size) * elem_size);
> +	}
> +}
> +
>  static __rte_noinline uint32_t
>  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  	struct vhost_virtqueue *vq, uint16_t queue_id,
> @@ -1478,6 +1501,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net
> *dev,
>  	struct rte_vhost_iov_iter *dst_it = it_pool + 1;
>  	uint16_t slot_idx = 0;
>  	uint16_t segs_await = 0;
> +	uint16_t iovec_idx = 0, it_idx = 0;
>  	struct async_inflight_info *pkts_info = vq->async_pkts_info;
>  	uint32_t n_pkts = 0, pkt_err = 0;
>  	uint32_t num_async_pkts = 0, num_done_pkts = 0;
> @@ -1513,27 +1537,32 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
> 
>  		if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
>  				buf_vec, nr_vec, num_buffers,
> -				src_iovec, dst_iovec, src_it, dst_it) < 0) {
> +				&src_iovec[iovec_idx],
> +				&dst_iovec[iovec_idx],
> +				&src_it[it_idx],
> +				&dst_it[it_idx]) < 0) {

When use index, it's strange to get src and dst iov_iter from dst_it and src_it respectively,
as they are not start addresses of two separated iov_iter arrays but have overlapped
elements. IMO, there is no need to use src/dst_it, as they can be simply indexed by
it_pool[it_idx] and it_pool[it_idx+1].

>  			vq->shadow_used_idx -= num_buffers;
>  			break;
>  		}
> 
>  		slot_idx = (vq->async_pkts_idx + num_async_pkts) &
>  			(vq->size - 1);
> -		if (src_it->count) {
> +		if (src_it[it_idx].count) {
>  			uint16_t from, to;
> 
> -			async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
> +			async_fill_desc(&tdes[pkt_burst_idx++],
> +				&src_it[it_idx],
> +				&dst_it[it_idx]);
>  			pkts_info[slot_idx].descs = num_buffers;
>  			pkts_info[slot_idx].mbuf = pkts[pkt_idx];
>  			async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
>  			async_pkts_log[num_async_pkts++].last_avail_idx =
>  				vq->last_avail_idx;
> -			src_iovec += src_it->nr_segs;
> -			dst_iovec += dst_it->nr_segs;
> -			src_it += 2;
> -			dst_it += 2;
> -			segs_await += src_it->nr_segs;
> +
> +			iovec_idx += src_it[it_idx].nr_segs;
> +			it_idx += 2;
> +
> +			segs_await += src_it[it_idx].nr_segs;
> 
>  			/**
>  			 * recover shadow used ring and keep DMA-occupied
> @@ -1541,23 +1570,12 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>  			 */
>  			from = vq->shadow_used_idx - num_buffers;
>  			to = vq->async_desc_idx & (vq->size - 1);
> -			if (num_buffers + to <= vq->size) {
> -				rte_memcpy(&vq->async_descs_split[to],
> -						&vq-
> >shadow_used_split[from],
> -						num_buffers *
> -						sizeof(struct
> vring_used_elem));
> -			} else {
> -				int size = vq->size - to;
> -
> -				rte_memcpy(&vq->async_descs_split[to],
> -						&vq-
> >shadow_used_split[from],
> -						size *
> -						sizeof(struct
> vring_used_elem));
> -				rte_memcpy(vq->async_descs_split,
> -						&vq-
> >shadow_used_split[from +
> -						size], (num_buffers - size) *
> -					   sizeof(struct vring_used_elem));
> -			}
> +
> +			shadow_ring_store(vq, vq->shadow_used_split,
> +					vq->async_descs_split,
> +					from, to, num_buffers,
> +					sizeof(struct vring_used_elem));

This function is to store DMA-occupied desc, but " shadow_ring_store" is not a good name
for it. In addition, I think there is no need to pass vq as a parameter. What you need is the
size of shadow ring and async desc ring.

Thanks,
Jiayu
> +
>  			vq->async_desc_idx += num_buffers;
>  			vq->shadow_used_idx -= num_buffers;
>  		} else
> @@ -1575,10 +1593,9 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>  			BUF_VECTOR_MAX))) {
>  			n_pkts = vq->async_ops.transfer_data(dev->vid,
>  					queue_id, tdes, 0, pkt_burst_idx);
> -			src_iovec = vec_pool;
> -			dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >>
> 1);
> -			src_it = it_pool;
> -			dst_it = it_pool + 1;
> +			iovec_idx = 0;
> +			it_idx = 0;
> +
>  			segs_await = 0;
>  			vq->async_pkts_inflight_n += n_pkts;
> 
> @@ -1639,6 +1656,43 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>  	return pkt_idx;
>  }
> 
> +static __rte_always_inline void
> +write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t
> n_descs)
> +{
> +	uint16_t nr_left = n_descs;
> +	uint16_t nr_copy;
> +	uint16_t to, from;
> +
> +	do {
> +		from = vq->last_async_desc_idx & (vq->size - 1);
> +		nr_copy = nr_left + from <= vq->size ? nr_left :
> +			vq->size - from;
> +		to = vq->last_used_idx & (vq->size - 1);
> +
> +		if (to + nr_copy <= vq->size) {
> +			rte_memcpy(&vq->used->ring[to],
> +					&vq->async_descs_split[from],
> +					nr_copy *
> +					sizeof(struct vring_used_elem));
> +		} else {
> +			uint16_t size = vq->size - to;
> +
> +			rte_memcpy(&vq->used->ring[to],
> +					&vq->async_descs_split[from],
> +					size *
> +					sizeof(struct vring_used_elem));
> +			rte_memcpy(vq->used->ring,
> +					&vq->async_descs_split[from +
> +					size], (nr_copy - size) *
> +					sizeof(struct vring_used_elem));
> +		}
> +
> +		vq->last_async_desc_idx += nr_copy;
> +		vq->last_used_idx += nr_copy;
> +		nr_left -= nr_copy;
> +	} while (nr_left > 0);
> +}
> +
>  uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
>  		struct rte_mbuf **pkts, uint16_t count)
>  {
> @@ -1695,39 +1749,7 @@ uint16_t rte_vhost_poll_enqueue_completed(int
> vid, uint16_t queue_id,
>  	vq->async_pkts_inflight_n -= n_pkts_put;
> 
>  	if (likely(vq->enabled && vq->access_ok)) {
> -		uint16_t nr_left = n_descs;
> -		uint16_t nr_copy;
> -		uint16_t to;
> -
> -		/* write back completed descriptors to used ring */
> -		do {
> -			from = vq->last_async_desc_idx & (vq->size - 1);
> -			nr_copy = nr_left + from <= vq->size ? nr_left :
> -				vq->size - from;
> -			to = vq->last_used_idx & (vq->size - 1);
> -
> -			if (to + nr_copy <= vq->size) {
> -				rte_memcpy(&vq->used->ring[to],
> -						&vq-
> >async_descs_split[from],
> -						nr_copy *
> -						sizeof(struct
> vring_used_elem));
> -			} else {
> -				uint16_t size = vq->size - to;
> -
> -				rte_memcpy(&vq->used->ring[to],
> -						&vq-
> >async_descs_split[from],
> -						size *
> -						sizeof(struct
> vring_used_elem));
> -				rte_memcpy(vq->used->ring,
> -						&vq->async_descs_split[from
> +
> -						size], (nr_copy - size) *
> -						sizeof(struct
> vring_used_elem));
> -			}
> -
> -			vq->last_async_desc_idx += nr_copy;
> -			vq->last_used_idx += nr_copy;
> -			nr_left -= nr_copy;
> -		} while (nr_left > 0);
> +		write_back_completed_descs_split(vq, n_descs);
> 
>  		__atomic_add_fetch(&vq->used->idx, n_descs,
> __ATOMIC_RELEASE);
>  		vhost_vring_call_split(dev, vq);
> --
> 2.29.2


  reply	other threads:[~2021-04-13  2:44 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-17  8:54 [dpdk-dev] [PATCH] vhost: add support for packed ring in async vhost Cheng Jiang
2021-03-22  6:15 ` [dpdk-dev] [PATCH v2] " Cheng Jiang
2021-03-24  9:19   ` Liu, Yong
2021-03-29 12:29     ` Jiang, Cheng1
2021-03-31 14:06 ` [dpdk-dev] [PATCH v3] " Cheng Jiang
2021-04-07  6:26   ` Hu, Jiayu
2021-04-08 12:01     ` Jiang, Cheng1
2021-04-10 10:25 ` [dpdk-dev] [PATCH v4 0/4] " Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-10 10:25   ` Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-10 10:25   ` [dpdk-dev] [PATCH v4 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-12 11:34 ` [dpdk-dev] [PATCH v5 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-13  2:44     ` Hu, Jiayu [this message]
2021-04-13  3:26       ` Jiang, Cheng1
2021-04-13  7:11     ` Maxime Coquelin
2021-04-13  9:06       ` Jiang, Cheng1
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-13  8:36     ` Maxime Coquelin
2021-04-13 11:48       ` Jiang, Cheng1
2021-04-13 13:08         ` Maxime Coquelin
2021-04-13 13:50           ` Jiang, Cheng1
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-12 11:34   ` [dpdk-dev] [PATCH v5 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-13 14:55 ` [dpdk-dev] [PATCH v6 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-13 14:55   ` [dpdk-dev] [PATCH v6 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-14  6:13 ` [dpdk-dev] [PATCH v7 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-14 12:24     ` Maxime Coquelin
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-14 13:40     ` Maxime Coquelin
2021-04-15  5:42       ` Jiang, Cheng1
2021-04-15  2:02     ` Hu, Jiayu
2021-04-15  5:54       ` Jiang, Cheng1
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-14  6:13   ` [dpdk-dev] [PATCH v7 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-19  8:51 ` [dpdk-dev] [PATCH v8 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-27  1:19     ` Hu, Jiayu
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-27  5:16     ` Hu, Jiayu
2021-04-27  6:07       ` Jiang, Cheng1
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-19  8:51   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-27  8:03 ` [dpdk-dev] [PATCH v9 0/4] add support for packed ring in async vhost Cheng Jiang
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 1/4] vhost: abstract and reorganize async split ring code Cheng Jiang
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 2/4] vhost: add support for packed ring in async vhost Cheng Jiang
2021-04-29  1:48     ` Hu, Jiayu
2021-04-29  9:50     ` Maxime Coquelin
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 3/4] vhost: add batch datapath for async vhost packed ring Cheng Jiang
2021-04-29  9:57     ` Maxime Coquelin
2021-04-27  8:03   ` [dpdk-dev] [PATCH v9 4/4] doc: add release note for vhost async " Cheng Jiang
2021-04-29  9:58     ` Maxime Coquelin
2021-05-04 18:38     ` Ferruh Yigit
2021-05-04  8:28   ` [dpdk-dev] [PATCH v9 0/4] add support for packed ring in async vhost Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c65eb35b2a3048eb9be1320c0d45c198@intel.com \
    --to=jiayu.hu@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=yinan.wang@intel.com \
    --cc=yong.liu@intel.com \
    --cc=yvonnex.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git