DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>, "Hu, Jiayu" <jiayu.hu@intel.com>,
	"Wang, YuanX" <yuanx.wang@intel.com>,
	"Ma, WenwuX" <wenwux.ma@intel.com>,
	"Richardson, Bruce" <bruce.richardson@intel.com>,
	"Mcnamara, John" <john.mcnamara@intel.com>
Subject: Re: [dpdk-dev] [PATCH v2 06/15] vhost: introduce specific iovec structure
Date: Fri, 29 Oct 2021 06:19:25 +0000	[thread overview]
Message-ID: <SN6PR11MB35045EF69F07AB9CD32617419C879@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20211026162904.482987-7-maxime.coquelin@redhat.com>

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, October 27, 2021 12:29 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; Hu, Jiayu
> <jiayu.hu@intel.com>; Wang, YuanX <yuanx.wang@intel.com>; Ma, WenwuX
> <wenwux.ma@intel.com>; Richardson, Bruce <bruce.richardson@intel.com>;
> Mcnamara, John <john.mcnamara@intel.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v2 06/15] vhost: introduce specific iovec structure
> 
> This patch introduces rte_vhost_iovec struct that contains
> both source and destination addresses since we always have
> a 1:1 mapping between source and destination. While using
> the standard iovec struct might have seemed better, having
> to duplicate IO vectors and its iterators is memory
> inefficient and make the implementation more complex.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  examples/vhost/ioat.c       | 24 ++++++-------
>  lib/vhost/rte_vhost_async.h | 19 +++++++----
>  lib/vhost/vhost.h           |  6 ++--
>  lib/vhost/virtio_net.c      | 68 ++++++++++++++-----------------------
>  4 files changed, 52 insertions(+), 65 deletions(-)
> 
> diff --git a/examples/vhost/ioat.c b/examples/vhost/ioat.c
> index 457f8171f0..dcbcf65e4e 100644
> --- a/examples/vhost/ioat.c
> +++ b/examples/vhost/ioat.c
> @@ -129,33 +129,31 @@ ioat_transfer_data_cb(int vid, uint16_t queue_id,
>  {
>  	uint32_t i_desc;
>  	uint16_t dev_id = dma_bind[vid].dmas[queue_id * 2 + VIRTIO_RXQ].dev_id;
> -	struct rte_vhost_iov_iter *src = NULL;
> -	struct rte_vhost_iov_iter *dst = NULL;
> +	struct rte_vhost_iov_iter *iter = NULL;
>  	unsigned long i_seg;
>  	unsigned short mask = MAX_ENQUEUED_SIZE - 1;
>  	unsigned short write = cb_tracker[dev_id].next_write;
> 
>  	if (!opaque_data) {
>  		for (i_desc = 0; i_desc < count; i_desc++) {
> -			src = descs[i_desc].src;
> -			dst = descs[i_desc].dst;
> +			iter = descs[i_desc].iter;
>  			i_seg = 0;
> -			if (cb_tracker[dev_id].ioat_space < src->nr_segs)
> +			if (cb_tracker[dev_id].ioat_space < iter->nr_segs)
>  				break;
> -			while (i_seg < src->nr_segs) {
> +			while (i_seg < iter->nr_segs) {
>  				rte_ioat_enqueue_copy(dev_id,
> -					(uintptr_t)(src->iov[i_seg].iov_base)
> -						+ src->offset,
> -					(uintptr_t)(dst->iov[i_seg].iov_base)
> -						+ dst->offset,
> -					src->iov[i_seg].iov_len,
> +					(uintptr_t)(iter->iov[i_seg].src_addr)
> +						+ iter->offset,
> +					(uintptr_t)(iter->iov[i_seg].dst_addr)
> +						+ iter->offset,
> +					iter->iov[i_seg].len,
>  					0,
>  					0);
>  				i_seg++;
>  			}
>  			write &= mask;
> -			cb_tracker[dev_id].size_track[write] = src->nr_segs;
> -			cb_tracker[dev_id].ioat_space -= src->nr_segs;
> +			cb_tracker[dev_id].size_track[write] = iter->nr_segs;
> +			cb_tracker[dev_id].ioat_space -= iter->nr_segs;
>  			write++;
>  		}
>  	} else {
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 789b80f5a0..d7bb77bf90 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -7,6 +7,15 @@
> 
>  #include "rte_vhost.h"
> 
> +/**
> + * iovec
> + */
> +struct rte_vhost_iovec {
> +	void *src_addr;
> +	void *dst_addr;
> +	size_t len;
> +};
> +
>  /**
>   * iovec iterator
>   */
> @@ -16,19 +25,17 @@ struct rte_vhost_iov_iter {
>  	/** total bytes of data in this iterator */
>  	size_t count;
>  	/** pointer to the iovec array */
> -	struct iovec *iov;
> +	struct rte_vhost_iovec *iov;
>  	/** number of iovec in this iterator */
>  	unsigned long nr_segs;
>  };
> 
>  /**
> - * dma transfer descriptor pair
> + * dma transfer descriptor
>   */
>  struct rte_vhost_async_desc {
> -	/** source memory iov_iter */
> -	struct rte_vhost_iov_iter *src;
> -	/** destination memory iov_iter */
> -	struct rte_vhost_iov_iter *dst;
> +	/* memory iov_iter */
> +	struct rte_vhost_iov_iter *iter;
>  };
> 
>  /**
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 132c1b070e..e51e496ebe 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -132,10 +132,8 @@ struct vhost_async {
>  	/* operation callbacks for DMA */
>  	struct rte_vhost_async_channel_ops ops;
> 
> -	struct rte_vhost_iov_iter src_iov_iter[VHOST_MAX_ASYNC_IT];
> -	struct rte_vhost_iov_iter dst_iov_iter[VHOST_MAX_ASYNC_IT];
> -	struct iovec src_iovec[VHOST_MAX_ASYNC_VEC];
> -	struct iovec dst_iovec[VHOST_MAX_ASYNC_VEC];
> +	struct rte_vhost_iov_iter iov_iter[VHOST_MAX_ASYNC_IT];
> +	struct rte_vhost_iovec iovec[VHOST_MAX_ASYNC_VEC];
> 
>  	/* data transfer status */
>  	struct async_inflight_info *pkts_info;
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index c4a8b5276f..3c8be48ca7 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -925,15 +925,16 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>  }
> 
>  static __rte_always_inline void
> -async_fill_vec(struct iovec *v, void *base, size_t len)
> +async_fill_vec(struct rte_vhost_iovec *v, void *src, void *dst, size_t len)
>  {
> -	v->iov_base = base;
> -	v->iov_len = len;
> +	v->src_addr = src;
> +	v->dst_addr = dst;
> +	v->len = len;
>  }
> 
>  static __rte_always_inline void
>  async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
> -	struct iovec *vec, unsigned long nr_seg)
> +	struct rte_vhost_iovec *vec, unsigned long nr_seg)
>  {
>  	it->offset = 0;
>  	it->count = count;
> @@ -948,20 +949,16 @@ async_fill_iter(struct rte_vhost_iov_iter *it, size_t
> count,
>  }
> 
>  static __rte_always_inline void
> -async_fill_desc(struct rte_vhost_async_desc *desc,
> -	struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
> +async_fill_desc(struct rte_vhost_async_desc *desc, struct rte_vhost_iov_iter
> *iter)
>  {
> -	desc->src = src;
> -	desc->dst = dst;
> +	desc->iter = iter;
>  }
> 
>  static __rte_always_inline int
>  async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  			struct rte_mbuf *m, struct buf_vector *buf_vec,
>  			uint16_t nr_vec, uint16_t num_buffers,
> -			struct iovec *src_iovec, struct iovec *dst_iovec,
> -			struct rte_vhost_iov_iter *src_it,
> -			struct rte_vhost_iov_iter *dst_it)
> +			struct rte_vhost_iovec *iovec, struct rte_vhost_iov_iter
> *iter)
>  {
>  	struct rte_mbuf *hdr_mbuf;
>  	struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
> @@ -1075,11 +1072,9 @@ async_mbuf_to_desc(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>  				goto out;
>  			}
> 
> -			async_fill_vec(src_iovec + tvec_idx,
> +			async_fill_vec(iovec + tvec_idx,
>  				(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
> -				mbuf_offset), (size_t)mapped_len);
> -			async_fill_vec(dst_iovec + tvec_idx,
> -					hpa, (size_t)mapped_len);
> +				mbuf_offset), hpa, (size_t)mapped_len);
> 
>  			tlen += (uint32_t)mapped_len;
>  			cpy_len -= (uint32_t)mapped_len;
> @@ -1091,8 +1086,7 @@ async_mbuf_to_desc(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
>  		}
>  	}
> 
> -	async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
> -	async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
> +	async_fill_iter(iter, tlen, iovec, tvec_idx);
>  out:
>  	return error;
>  }
> @@ -1509,11 +1503,9 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>  	uint16_t avail_head;
> 
>  	struct vhost_async *async = vq->async;
> -	struct rte_vhost_iov_iter *src_iter = async->src_iov_iter;
> -	struct rte_vhost_iov_iter *dst_iter = async->dst_iov_iter;
> +	struct rte_vhost_iov_iter *iter = async->iov_iter;
>  	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
> -	struct iovec *src_iovec = async->src_iovec;
> -	struct iovec *dst_iovec = async->dst_iovec;
> +	struct rte_vhost_iovec *iovec = async->iovec;
>  	struct async_inflight_info *pkts_info = async->pkts_info;
>  	uint32_t n_pkts = 0, pkt_err = 0;
>  	int32_t n_xfer;
> @@ -1545,19 +1537,18 @@ virtio_dev_rx_async_submit_split(struct virtio_net
> *dev,
>  			vq->last_avail_idx + num_buffers);
> 
>  		if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
> num_buffers,
> -				&src_iovec[iovec_idx], &dst_iovec[iovec_idx],
> -				&src_iter[it_idx], &dst_iter[it_idx]) < 0) {
> +				&iovec[iovec_idx], &iter[it_idx]) < 0) {
>  			vq->shadow_used_idx -= num_buffers;
>  			break;
>  		}
> 
> -		async_fill_desc(&tdes[pkt_burst_idx++], &src_iter[it_idx],
> &dst_iter[it_idx]);
> +		async_fill_desc(&tdes[pkt_burst_idx++], &iter[it_idx]);
> 
>  		slot_idx = (async->pkts_idx + pkt_idx) & (vq->size - 1);
>  		pkts_info[slot_idx].descs = num_buffers;
>  		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> 
> -		iovec_idx += src_iter[it_idx].nr_segs;
> +		iovec_idx += iter[it_idx].nr_segs;
>  		it_idx++;
> 
>  		vq->last_avail_idx += num_buffers;
> @@ -1707,9 +1698,8 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
>  			    struct buf_vector *buf_vec,
>  			    uint16_t *nr_descs,
>  			    uint16_t *nr_buffers,
> -			    struct iovec *src_iovec, struct iovec *dst_iovec,
> -			    struct rte_vhost_iov_iter *src_it,
> -			    struct rte_vhost_iov_iter *dst_it)
> +			    struct rte_vhost_iovec *iovec,
> +			    struct rte_vhost_iov_iter *iter)
>  {
>  	uint16_t nr_vec = 0;
>  	uint16_t avail_idx = vq->last_avail_idx;
> @@ -1757,8 +1747,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
>  	}
> 
>  	if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
> -					*nr_buffers, src_iovec, dst_iovec,
> -					src_it, dst_it) < 0))
> +					*nr_buffers, iovec, iter) < 0))
>  		return -1;
> 
>  	vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> buffer_desc_count, *nr_buffers);
> @@ -1769,14 +1758,12 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
>  static __rte_always_inline int16_t
>  virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  			    struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t
> *nr_buffers,
> -			    struct iovec *src_iovec, struct iovec *dst_iovec,
> -			    struct rte_vhost_iov_iter *src_it, struct
> rte_vhost_iov_iter *dst_it)
> +			    struct rte_vhost_iovec *iovec, struct rte_vhost_iov_iter
> *iter)
>  {
>  	struct buf_vector buf_vec[BUF_VECTOR_MAX];
> 
>  	if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec, nr_descs,
> nr_buffers,
> -						 src_iovec, dst_iovec,
> -						 src_it, dst_it) < 0)) {
> +						 iovec, iter) < 0)) {
>  		VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from
> vring\n", dev->vid);
>  		return -1;
>  	}
> @@ -1825,11 +1812,9 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
>  	uint16_t num_descs;
> 
>  	struct vhost_async *async = vq->async;
> -	struct rte_vhost_iov_iter *src_iter = async->src_iov_iter;
> -	struct rte_vhost_iov_iter *dst_iter = async->dst_iov_iter;
> +	struct rte_vhost_iov_iter *iter = async->iov_iter;
>  	struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
> -	struct iovec *src_iovec = async->src_iovec;
> -	struct iovec *dst_iovec = async->dst_iovec;
> +	struct rte_vhost_iovec *iovec = async->iovec;
>  	struct async_inflight_info *pkts_info = async->pkts_info;
>  	uint32_t n_pkts = 0, pkt_err = 0;
>  	uint16_t slot_idx = 0;
> @@ -1842,17 +1827,16 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev,
>  		num_descs = 0;
>  		if (unlikely(virtio_dev_rx_async_packed(dev, vq, pkts[pkt_idx],
>  						&num_descs, &num_buffers,
> -						&src_iovec[iovec_idx],
> &dst_iovec[iovec_idx],
> -						&src_iter[it_idx], &dst_iter[it_idx]) < 0))
> +						&iovec[iovec_idx], &iter[it_idx]) < 0))
>  			break;
> 
>  		slot_idx = (async->pkts_idx + pkt_idx) % vq->size;
> 
> -		async_fill_desc(&tdes[pkt_burst_idx++], &src_iter[it_idx],
> &dst_iter[it_idx]);
> +		async_fill_desc(&tdes[pkt_burst_idx++], &iter[it_idx]);
>  		pkts_info[slot_idx].descs = num_descs;
>  		pkts_info[slot_idx].nr_buffers = num_buffers;
>  		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> -		iovec_idx += src_iter[it_idx].nr_segs;
> +		iovec_idx += iter[it_idx].nr_segs;
>  		it_idx++;
> 
>  		pkt_idx++;
> --
> 2.31.1

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

  reply	other threads:[~2021-10-29  6:19 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 16:28 [dpdk-dev] [PATCH v2 00/15] vhost: clean-up and simplify async implementation Maxime Coquelin
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 01/15] vhost: move async data in a dedicated structure Maxime Coquelin
2021-10-28 12:03   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 02/15] vhost: hide inflight async structure Maxime Coquelin
2021-10-28 12:07   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 03/15] vhost: simplify async IO vectors Maxime Coquelin
2021-10-29  5:37   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 04/15] vhost: simplify async IO vectors iterators Maxime Coquelin
2021-10-29  5:44   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 05/15] vhost: remove async batch threshold Maxime Coquelin
2021-10-29  6:19   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 06/15] vhost: introduce specific iovec structure Maxime Coquelin
2021-10-29  6:19   ` Xia, Chenbo [this message]
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 07/15] vhost: remove useless fields in async iterator struct Maxime Coquelin
2021-10-29  6:19   ` Xia, Chenbo
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 08/15] vhost: improve IO vector logic Maxime Coquelin
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 09/15] vhost: remove notion of async descriptor Maxime Coquelin
2021-10-26 16:28 ` [dpdk-dev] [PATCH v2 10/15] vhost: simplify async enqueue completion Maxime Coquelin
2021-10-26 16:29 ` [dpdk-dev] [PATCH v2 11/15] vhost: simplify getting the first in-flight index Maxime Coquelin
2021-10-26 16:29 ` [dpdk-dev] [PATCH v2 12/15] vhost: prepare async for mbuf to desc refactoring Maxime Coquelin
2021-10-26 16:29 ` [dpdk-dev] [PATCH v2 13/15] vhost: prepare sync " Maxime Coquelin
2021-10-26 16:29 ` [dpdk-dev] [PATCH v2 14/15] vhost: merge sync and async mbuf to desc filling Maxime Coquelin
2021-10-26 16:29 ` [dpdk-dev] [PATCH v2 15/15] vhost: increase number of async IO vectors Maxime Coquelin
2021-10-28 12:40 ` [dpdk-dev] [PATCH v2 00/15] vhost: clean-up and simplify async implementation Hu, Jiayu
2021-10-29 10:34 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB35045EF69F07AB9CD32617419C879@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=wenwux.ma@intel.com \
    --cc=yuanx.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).