DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: "Coquelin, Maxime" <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"david.marchand@redhat.com" <david.marchand@redhat.com>,
	"eperezma@redhat.com" <eperezma@redhat.com>
Subject: RE: [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path
Date: Mon, 30 Jan 2023 07:49:08 +0000	[thread overview]
Message-ID: <SN6PR11MB35048AEE5A2952796082C4619CD39@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20221130155639.150553-11-maxime.coquelin@redhat.com>

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized
> path
> 
> This patch only allocates the SW ring when vectorized
> datapath is used. It also moves the SW ring and fake mbuf
> in the virtnet_rx struct since this is Rx-only.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c            | 88 ++++++++++++-------
>  drivers/net/virtio/virtio_rxtx.c              |  8 +-
>  drivers/net/virtio/virtio_rxtx.h              |  4 +-
>  drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
>  .../net/virtio/virtio_rxtx_simple_altivec.c   |  4 +-
>  drivers/net/virtio/virtio_rxtx_simple_neon.c  |  4 +-
>  drivers/net/virtio/virtio_rxtx_simple_sse.c   |  4 +-
>  drivers/net/virtio/virtqueue.c                |  6 +-
>  drivers/net/virtio/virtqueue.h                |  1 -
>  9 files changed, 72 insertions(+), 49 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 8b17b450ec..46dd5606f6 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -339,6 +339,47 @@ virtio_free_queue_headers(struct virtqueue *vq)
>  	*hdr_mem = 0;
>  }
> 
> +static int
> +virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
> +{
> +	void *sw_ring;
> +	struct rte_mbuf *mbuf;
> +	size_t size;
> +
> +	/* SW ring is only used with vectorized datapath */
> +	if (!vq->hw->use_vec_rx)
> +		return 0;
> +
> +	size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq-
> >rxq.sw_ring[0]);
> +
> +	sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE,
> numa_node);
> +	if (!sw_ring) {
> +		PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
> +		return -ENOMEM;
> +	}
> +
> +	mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf),
> RTE_CACHE_LINE_SIZE, numa_node);
> +	if (!mbuf) {
> +		PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> +		rte_free(sw_ring);
> +		return -ENOMEM;
> +	}
> +
> +	vq->rxq.sw_ring = sw_ring;
> +	vq->rxq.fake_mbuf = mbuf;
> +
> +	return 0;
> +}
> +
> +static void
> +virtio_rxq_sw_ring_free(struct virtqueue *vq)
> +{
> +	rte_free(vq->rxq.fake_mbuf);
> +	vq->rxq.fake_mbuf = NULL;
> +	rte_free(vq->rxq.sw_ring);
> +	vq->rxq.sw_ring = NULL;
> +}
> +
>  static int
>  virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>  {
> @@ -346,14 +387,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	const struct rte_memzone *mz = NULL;
>  	unsigned int vq_size, size;
>  	struct virtio_hw *hw = dev->data->dev_private;
> -	struct virtnet_rx *rxvq = NULL;
>  	struct virtnet_ctl *cvq = NULL;
>  	struct virtqueue *vq;
> -	void *sw_ring = NULL;
>  	int queue_type = virtio_get_queue_type(hw, queue_idx);
>  	int ret;
>  	int numa_node = dev->device->numa_node;
> -	struct rte_mbuf *fake_mbuf = NULL;
> 
>  	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
>  			queue_idx, numa_node);
> @@ -441,28 +479,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	}
> 
>  	if (queue_type == VTNET_RQ) {
> -		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
> -			       sizeof(vq->sw_ring[0]);
> -
> -		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
> -				RTE_CACHE_LINE_SIZE, numa_node);
> -		if (!sw_ring) {
> -			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
> -			ret = -ENOMEM;
> +		ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
> +		if (ret)
>  			goto free_hdr_mz;
> -		}
> -
> -		fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
> -				RTE_CACHE_LINE_SIZE, numa_node);
> -		if (!fake_mbuf) {
> -			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> -			ret = -ENOMEM;
> -			goto free_sw_ring;
> -		}
> -
> -		vq->sw_ring = sw_ring;
> -		rxvq = &vq->rxq;
> -		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
>  		virtqueue_txq_indirect_headers_init(vq);
>  	} else if (queue_type == VTNET_CQ) {
> @@ -486,9 +505,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> 
>  clean_vq:
>  	hw->cvq = NULL;
> -	rte_free(fake_mbuf);
> -free_sw_ring:
> -	rte_free(sw_ring);
> +	if (queue_type == VTNET_RQ)
> +		virtio_rxq_sw_ring_free(vq);
>  free_hdr_mz:
>  	virtio_free_queue_headers(vq);
>  free_mz:
> @@ -519,7 +537,7 @@ virtio_free_queues(struct virtio_hw *hw)
>  		queue_type = virtio_get_queue_type(hw, i);
>  		if (queue_type == VTNET_RQ) {
>  			rte_free(vq->rxq.fake_mbuf);
> -			rte_free(vq->sw_ring);
> +			rte_free(vq->rxq.sw_ring);
>  		}
> 
>  		virtio_free_queue_headers(vq);
> @@ -2195,6 +2213,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> 
>  	rte_spinlock_init(&hw->state_lock);
> 
> +	if (vectorized) {
> +		hw->use_vec_rx = 1;
> +		hw->use_vec_tx = 1;
> +	}
> +
>  	/* reset device and negotiate default features */
>  	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
>  	if (ret < 0)
> @@ -2202,12 +2225,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> 
>  	if (vectorized) {
>  		if (!virtio_with_packed_queue(hw)) {
> -			hw->use_vec_rx = 1;
> +			hw->use_vec_tx = 0;
>  		} else {
> -#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
> -			hw->use_vec_rx = 1;
> -			hw->use_vec_tx = 1;
> -#else
> +#if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
> +			hw->use_vec_rx = 0;
> +			hw->use_vec_tx = 0;
>  			PMD_DRV_LOG(INFO,
>  				"building environment do not support packed ring
> vectorized");
>  #endif
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 4f69b97f41..2d0afd3302 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -737,9 +737,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev
> *dev, uint16_t queue_idx)
>  		virtio_rxq_vec_setup(rxvq);
>  	}
> 
> -	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> -	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> desc_idx++)
> -		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
> +	if (hw->use_vec_rx) {
> +		memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> +		for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> desc_idx++)
> +			vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq-
> >fake_mbuf;
> +	}
> 
>  	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>  		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 57af630110..afc4b74534 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -18,8 +18,8 @@ struct virtnet_stats {
>  };
> 
>  struct virtnet_rx {
> -	/* dummy mbuf, for wraparound when processing RX ring. */
> -	struct rte_mbuf *fake_mbuf;
> +	struct rte_mbuf **sw_ring;  /**< RX software ring. */
> +	struct rte_mbuf *fake_mbuf; /**< dummy mbuf, for wraparound when
> processing RX ring. */
>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
> 
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.h
> b/drivers/net/virtio/virtio_rxtx_simple.h
> index 8e235f4dbc..79196ed86e 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.h
> +++ b/drivers/net/virtio/virtio_rxtx_simple.h
> @@ -26,7 +26,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
>  	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
> 
>  	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
> -	sw_ring = &vq->sw_ring[desc_idx];
> +	sw_ring = &vq->rxq.sw_ring[desc_idx];
>  	start_dp = &vq->vq_split.ring.desc[desc_idx];
> 
>  	ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> index e7f0ed6068..7910efc153 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> @@ -103,8 +103,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> 
>  	desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
>  	rused = &vq->vq_split.ring.used->ring[desc_idx];
> -	sw_ring  = &vq->sw_ring[desc_idx];
> -	sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> +	sw_ring  = &vq->rxq.sw_ring[desc_idx];

After sw_ring, there are two spaces, should be only one.

> +	sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
> 
>  	rte_prefetch0(rused);
> 
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> index 7fd92d1b0c..ffaa139bd6 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> @@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue,
> 
>  	desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
>  	rused = &vq->vq_split.ring.used->ring[desc_idx];
> -	sw_ring  = &vq->sw_ring[desc_idx];
> -	sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> +	sw_ring  = &vq->rxq.sw_ring[desc_idx];

Ditto

> +	sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
> 
>  	rte_prefetch_non_temporal(rused);
> 
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> index 7577f5e86d..ed608fbf2e 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> @@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> 
>  	desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
>  	rused = &vq->vq_split.ring.used->ring[desc_idx];
> -	sw_ring  = &vq->sw_ring[desc_idx];
> -	sw_ring_end = &vq->sw_ring[vq->vq_nentries];
> +	sw_ring  = &vq->rxq.sw_ring[desc_idx];

Ditto

Thanks,
Chenbo

> +	sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
> 
>  	rte_prefetch0(rused);
> 
> diff --git a/drivers/net/virtio/virtqueue.c
> b/drivers/net/virtio/virtqueue.c
> index fb651a4ca3..7a84796513 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -38,9 +38,9 @@ virtqueue_detach_unused(struct virtqueue *vq)
>  				continue;
>  			if (start > end && (idx >= start || idx < end))
>  				continue;
> -			cookie = vq->sw_ring[idx];
> +			cookie = vq->rxq.sw_ring[idx];
>  			if (cookie != NULL) {
> -				vq->sw_ring[idx] = NULL;
> +				vq->rxq.sw_ring[idx] = NULL;
>  				return cookie;
>  			}
>  		} else {
> @@ -100,7 +100,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
>  		uep = &vq->vq_split.ring.used->ring[used_idx];
>  		if (hw->use_vec_rx) {
>  			desc_idx = used_idx;
> -			rte_pktmbuf_free(vq->sw_ring[desc_idx]);
> +			rte_pktmbuf_free(vq->rxq.sw_ring[desc_idx]);
>  			vq->vq_free_cnt++;
>  		} else if (hw->use_inorder_rx) {
>  			desc_idx = (uint16_t)uep->id;
> diff --git a/drivers/net/virtio/virtqueue.h
> b/drivers/net/virtio/virtqueue.h
> index d453c3ec26..d7f8ee79bb 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -206,7 +206,6 @@ struct virtqueue {
>  	                         * or virtual address for virtio_user. */
> 
>  	uint16_t  *notify_addr;
> -	struct rte_mbuf **sw_ring;  /**< RX software ring. */
>  	struct vq_desc_extra vq_descx[];
>  };
> 
> --
> 2.38.1


  reply	other threads:[~2023-01-30  7:49 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30  7:50   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30  7:49   ` Xia, Chenbo [this message]
2023-02-07 10:12     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-01-30  7:53   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31  5:20   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54   ` Stephen Hemminger
2022-12-06 12:58     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31  5:24   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-01-31  5:19   ` Xia, Chenbo
2023-02-07 14:14     ` Maxime Coquelin
2023-01-30  5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08   ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB35048AEE5A2952796082C4619CD39@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eperezma@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).