DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Jiang, Cheng1" <cheng1.jiang@intel.com>
To: "Ma, WenwuX" <wenwux.ma@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"Xia, Chenbo" <chenbo.xia@intel.com>,
	"Hu, Jiayu" <jiayu.hu@intel.com>,
	"Pai G, Sunil" <sunil.pai.g@intel.com>,
	"Yang, YvonneX" <yvonnex.yang@intel.com>
Subject: Re: [dpdk-dev] [PATCH v2 2/4] examples/vhost: refactor vhost enqueue and dequeue datapaths
Date: Mon, 27 Sep 2021 06:56:05 +0000	[thread overview]
Message-ID: <SJ0PR11MB5006EF25AC8573888330ABACDCA79@SJ0PR11MB5006.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20210917192703.385510-3-wenwux.ma@intel.com>

Hi,

> -----Original Message-----
> From: Ma, WenwuX <wenwux.ma@intel.com>
> Sent: Saturday, September 18, 2021 3:27 AM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> Jiang, Cheng1 <cheng1.jiang@intel.com>; Hu, Jiayu <jiayu.hu@intel.com>;
> Pai G, Sunil <sunil.pai.g@intel.com>; Yang, YvonneX
> <yvonnex.yang@intel.com>; Ma, WenwuX <wenwux.ma@intel.com>
> Subject: [PATCH v2 2/4] examples/vhost: refactor vhost enqueue and
> dequeue datapaths
> 
> Previously, by judging the flag, we call different enqueue/dequeue
> functions in data path.
> 
> Now, we use an ops that was initialized when Vhost was created,
> so that we can call ops directly in Vhost data path without any more
> flag judgment.
> 
> Signed-off-by: Wenwu Ma <wenwux.ma@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> Tested-by: Yvonne Yang <yvonnex.yang@intel.com>
> ---
>  examples/vhost/main.c       | 100 +++++++++++++++++++++---------------
>  examples/vhost/main.h       |  28 ++++++++--
>  examples/vhost/virtio_net.c |  16 +++++-
>  3 files changed, 98 insertions(+), 46 deletions(-)
> 
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index d0bf1f31e3..254f7097bc 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -106,6 +106,8 @@ static uint32_t burst_rx_retry_num =
> BURST_RX_RETRIES;
>  static char *socket_files;
>  static int nb_sockets;
> 
> +static struct vhost_queue_ops vdev_queue_ops[MAX_VHOST_DEVICE];
> +
>  /* empty vmdq configuration structure. Filled in programatically */
>  static struct rte_eth_conf vmdq_conf_default = {
>  	.rxmode = {
> @@ -879,22 +881,8 @@ drain_vhost(struct vhost_dev *vdev)
>  	uint16_t nr_xmit = vhost_txbuff[buff_idx]->len;
>  	struct rte_mbuf **m = vhost_txbuff[buff_idx]->m_table;
> 
> -	if (builtin_net_driver) {
> -		ret = vs_enqueue_pkts(vdev, VIRTIO_RXQ, m, nr_xmit);
> -	} else if (async_vhost_driver) {
> -		uint16_t enqueue_fail = 0;
> -
> -		complete_async_pkts(vdev);
> -		ret = rte_vhost_submit_enqueue_burst(vdev->vid,
> VIRTIO_RXQ, m, nr_xmit);
> -		__atomic_add_fetch(&vdev->pkts_inflight, ret,
> __ATOMIC_SEQ_CST);
> -
> -		enqueue_fail = nr_xmit - ret;
> -		if (enqueue_fail)
> -			free_pkts(&m[ret], nr_xmit - ret);
> -	} else {
> -		ret = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
> -						m, nr_xmit);
> -	}
> +	ret = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
> +					VIRTIO_RXQ, m, nr_xmit);
> 

Now, the line char number limit is 100, so you don't have to put it in 2 lines.

>  	if (enable_stats) {
>  		__atomic_add_fetch(&vdev->stats.rx_total_atomic, nr_xmit,
> @@ -1173,6 +1161,33 @@ drain_mbuf_table(struct mbuf_table *tx_q)
>  	}
>  }
> 
> +uint16_t
> +async_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id,
> +		struct rte_mbuf **pkts, uint32_t rx_count)
> +{
> +	uint16_t enqueue_count;
> +	uint16_t enqueue_fail = 0;
> +
> +	complete_async_pkts(vdev);
> +	enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
> +				queue_id, pkts, rx_count);

Same here.

> +	__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count,
> +					__ATOMIC_SEQ_CST);

Same here.

> +
> +	enqueue_fail = rx_count - enqueue_count;
> +	if (enqueue_fail)
> +		free_pkts(&pkts[enqueue_count], enqueue_fail);
> +
> +	return enqueue_count;
> +}
> +
> +uint16_t
> +sync_enqueue_pkts(struct vhost_dev *vdev, uint16_t queue_id,
> +		struct rte_mbuf **pkts, uint32_t rx_count)
> +{
> +	return rte_vhost_enqueue_burst(vdev->vid, queue_id, pkts,
> rx_count);
> +}
> +
>  static __rte_always_inline void
>  drain_eth_rx(struct vhost_dev *vdev)
>  {
> @@ -1203,25 +1218,8 @@ drain_eth_rx(struct vhost_dev *vdev)
>  		}
>  	}
> 
> -	if (builtin_net_driver) {
> -		enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
> -						pkts, rx_count);
> -	} else if (async_vhost_driver) {
> -		uint16_t enqueue_fail = 0;
> -
> -		complete_async_pkts(vdev);
> -		enqueue_count = rte_vhost_submit_enqueue_burst(vdev-
> >vid,
> -					VIRTIO_RXQ, pkts, rx_count);
> -		__atomic_add_fetch(&vdev->pkts_inflight, enqueue_count,
> __ATOMIC_SEQ_CST);
> -
> -		enqueue_fail = rx_count - enqueue_count;
> -		if (enqueue_fail)
> -			free_pkts(&pkts[enqueue_count], enqueue_fail);
> -
> -	} else {
> -		enqueue_count = rte_vhost_enqueue_burst(vdev->vid,
> VIRTIO_RXQ,
> -						pkts, rx_count);
> -	}
> +	enqueue_count = vdev_queue_ops[vdev-
> >vid].enqueue_pkt_burst(vdev,
> +						VIRTIO_RXQ, pkts, rx_count);
> 
>  	if (enable_stats) {
>  		__atomic_add_fetch(&vdev->stats.rx_total_atomic,
> rx_count,
> @@ -1234,6 +1232,14 @@ drain_eth_rx(struct vhost_dev *vdev)
>  		free_pkts(pkts, rx_count);
>  }
> 
> +uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			struct rte_mempool *mbuf_pool,
> +			struct rte_mbuf **pkts, uint16_t count)
> +{
> +	return rte_vhost_dequeue_burst(dev->vid, queue_id,
> +					mbuf_pool, pkts, count);

Same here.

> +}
> +
>  static __rte_always_inline void
>  drain_virtio_tx(struct vhost_dev *vdev)
>  {
> @@ -1241,13 +1247,8 @@ drain_virtio_tx(struct vhost_dev *vdev)
>  	uint16_t count;
>  	uint16_t i;
> 
> -	if (builtin_net_driver) {
> -		count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
> -					pkts, MAX_PKT_BURST);
> -	} else {
> -		count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
> -					mbuf_pool, pkts, MAX_PKT_BURST);
> -	}
> +	count = vdev_queue_ops[vdev->vid].dequeue_pkt_burst(vdev,
> +				VIRTIO_TXQ, mbuf_pool, pkts,
> MAX_PKT_BURST);
> 
>  	/* setup VMDq for the first packet */
>  	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
> @@ -1432,6 +1433,21 @@ new_device(int vid)
>  		}
>  	}
> 
> +	if (builtin_net_driver) {
> +		vdev_queue_ops[vid].enqueue_pkt_burst =
> builtin_enqueue_pkts;
> +		vdev_queue_ops[vid].dequeue_pkt_burst =
> builtin_dequeue_pkts;
> +	} else {
> +		if (async_vhost_driver) {
> +			vdev_queue_ops[vid].enqueue_pkt_burst =
> +							async_enqueue_pkts;

Same here.

> +		} else {
> +			vdev_queue_ops[vid].enqueue_pkt_burst =
> +							sync_enqueue_pkts;
> +		}

Same here. And it seems we don't need '{ }' here.

Thanks,
Cheng

> +
> +		vdev_queue_ops[vid].dequeue_pkt_burst =
> sync_dequeue_pkts;
> +	}
> +
>  	if (builtin_net_driver)
>  		vs_vhost_net_setup(vdev);
> 
> diff --git a/examples/vhost/main.h b/examples/vhost/main.h
> index e7b1ac60a6..2c5a558f12 100644
> --- a/examples/vhost/main.h
> +++ b/examples/vhost/main.h
> @@ -61,6 +61,19 @@ struct vhost_dev {
>  	struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
>  } __rte_cache_aligned;
> 
> +typedef uint16_t (*vhost_enqueue_burst_t)(struct vhost_dev *dev,
> +			uint16_t queue_id, struct rte_mbuf **pkts,
> +			uint32_t count);
> +
> +typedef uint16_t (*vhost_dequeue_burst_t)(struct vhost_dev *dev,
> +			uint16_t queue_id, struct rte_mempool *mbuf_pool,
> +			struct rte_mbuf **pkts, uint16_t count);
> +
> +struct vhost_queue_ops {
> +	vhost_enqueue_burst_t enqueue_pkt_burst;
> +	vhost_dequeue_burst_t dequeue_pkt_burst;
> +};
> +
>  TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
> 
> 
> @@ -87,7 +100,16 @@ void vs_vhost_net_remove(struct vhost_dev *dev);
>  uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
>  			 struct rte_mbuf **pkts, uint32_t count);
> 
> -uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> -			 struct rte_mempool *mbuf_pool,
> -			 struct rte_mbuf **pkts, uint16_t count);
> +uint16_t builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			struct rte_mbuf **pkts, uint32_t count);
> +uint16_t builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			struct rte_mempool *mbuf_pool,
> +			struct rte_mbuf **pkts, uint16_t count);
> +uint16_t sync_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			 struct rte_mbuf **pkts, uint32_t count);
> +uint16_t sync_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			struct rte_mempool *mbuf_pool,
> +			struct rte_mbuf **pkts, uint16_t count);
> +uint16_t async_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +			 struct rte_mbuf **pkts, uint32_t count);
>  #endif /* _MAIN_H_ */
> diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
> index 9064fc3a82..2432a96566 100644
> --- a/examples/vhost/virtio_net.c
> +++ b/examples/vhost/virtio_net.c
> @@ -238,6 +238,13 @@ vs_enqueue_pkts(struct vhost_dev *dev, uint16_t
> queue_id,
>  	return count;
>  }
> 
> +uint16_t
> +builtin_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +		struct rte_mbuf **pkts, uint32_t count)
> +{
> +	return vs_enqueue_pkts(dev, queue_id, pkts, count);
> +}
> +
>  static __rte_always_inline int
>  dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
>  	    struct rte_mbuf *m, uint16_t desc_idx,
> @@ -363,7 +370,7 @@ dequeue_pkt(struct vhost_dev *dev, struct
> rte_vhost_vring *vr,
>  	return 0;
>  }
> 
> -uint16_t
> +static uint16_t
>  vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
>  	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count)
>  {
> @@ -440,3 +447,10 @@ vs_dequeue_pkts(struct vhost_dev *dev, uint16_t
> queue_id,
> 
>  	return i;
>  }
> +
> +uint16_t
> +builtin_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
> +	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count)
> +{
> +	return vs_dequeue_pkts(dev, queue_id, mbuf_pool, pkts, count);
> +}
> --
> 2.25.1


  reply	other threads:[~2021-09-27  6:56 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-06 20:48 [dpdk-dev] [PATCH 0/4] support async dequeue for split ring Wenwu Ma
2021-09-06 20:48 ` [dpdk-dev] [PATCH 1/4] vhost: " Wenwu Ma
2021-09-10  7:36   ` Yang, YvonneX
2021-09-15  2:51   ` Xia, Chenbo
     [not found]     ` <CO1PR11MB4897F3D5ABDE7133DB99791385DB9@CO1PR11MB4897.namprd11.prod.outlook.com>
2021-09-15 11:35       ` Xia, Chenbo
2021-09-06 20:48 ` [dpdk-dev] [PATCH 2/4] examples/vhost: refactor vhost enqueue and dequeue datapaths Wenwu Ma
2021-09-10  7:38   ` Yang, YvonneX
2021-09-15  3:02   ` Xia, Chenbo
2021-09-06 20:48 ` [dpdk-dev] [PATCH 3/4] examples/vhost: use a new API to query remaining ring space Wenwu Ma
2021-09-10  7:38   ` Yang, YvonneX
2021-09-15  3:04   ` Xia, Chenbo
2021-09-06 20:48 ` [dpdk-dev] [PATCH 4/4] examples/vhost: support vhost async dequeue data path Wenwu Ma
2021-09-10  7:39   ` Yang, YvonneX
2021-09-15  3:27   ` Xia, Chenbo
2021-09-10  7:33 ` [dpdk-dev] [PATCH 0/4] support async dequeue for split ring Yang, YvonneX
2021-09-17 19:26 ` [dpdk-dev] [PATCH v2 " Wenwu Ma
2021-09-17 19:27   ` [dpdk-dev] [PATCH v2 1/4] vhost: " Wenwu Ma
2021-09-27  6:33     ` Jiang, Cheng1
2021-09-17 19:27   ` [dpdk-dev] [PATCH v2 2/4] examples/vhost: refactor vhost enqueue and dequeue datapaths Wenwu Ma
2021-09-27  6:56     ` Jiang, Cheng1 [this message]
2021-09-17 19:27   ` [dpdk-dev] [PATCH v2 3/4] examples/vhost: use a new API to query remaining ring space Wenwu Ma
2021-09-17 19:27   ` [dpdk-dev] [PATCH v2 4/4] examples/vhost: support vhost async dequeue data path Wenwu Ma
2021-09-28 18:56 ` [dpdk-dev] [PATCH v3 0/4] support async dequeue for split ring Wenwu Ma
2021-09-28 18:56   ` [dpdk-dev] [PATCH v3 1/4] vhost: " Wenwu Ma
2021-09-28 18:56   ` [dpdk-dev] [PATCH v3 2/4] examples/vhost: refactor vhost enqueue and dequeue datapaths Wenwu Ma
2021-09-28 18:56   ` [dpdk-dev] [PATCH v3 3/4] examples/vhost: use a new API to query remaining ring space Wenwu Ma
2021-09-28 18:56   ` [dpdk-dev] [PATCH v3 4/4] examples/vhost: support vhost async dequeue data path Wenwu Ma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SJ0PR11MB5006EF25AC8573888330ABACDCA79@SJ0PR11MB5006.namprd11.prod.outlook.com \
    --to=cheng1.jiang@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=sunil.pai.g@intel.com \
    --cc=wenwux.ma@intel.com \
    --cc=yvonnex.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).