DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: "Coquelin, Maxime" <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"david.marchand@redhat.com" <david.marchand@redhat.com>,
	"eperezma@redhat.com" <eperezma@redhat.com>
Subject: RE: [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct
Date: Mon, 30 Jan 2023 07:52:31 +0000	[thread overview]
Message-ID: <SN6PR11MB3504011C0342CD79B27AE8A39CD39@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20221130155639.150553-9-maxime.coquelin@redhat.com>

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue
> struct
> 
> Whatever its type (Rx, Tx or Ctl), all the virtqueue
> require a memzone for the vrings. This patch moves its
> pointer to the virtqueue struct, simplifying the code.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_cvq.h    |  1 -
>  drivers/net/virtio/virtio_ethdev.c | 11 ++---------
>  drivers/net/virtio/virtio_rxtx.h   |  4 ----
>  drivers/net/virtio/virtqueue.c     |  6 ++----
>  drivers/net/virtio/virtqueue.h     |  1 +
>  5 files changed, 5 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_cvq.h
> b/drivers/net/virtio/virtio_cvq.h
> index 0ff326b063..70739ae04b 100644
> --- a/drivers/net/virtio/virtio_cvq.h
> +++ b/drivers/net/virtio/virtio_cvq.h
> @@ -108,7 +108,6 @@ typedef uint8_t virtio_net_ctrl_ack;
>  struct virtnet_ctl {
>  	const struct rte_memzone *hdr_mz; /**< memzone to populate hdr. */
>  	rte_iova_t hdr_mem;               /**< hdr for each xmit packet */
> -	const struct rte_memzone *mz;     /**< mem zone to populate CTL ring.
> */
>  	rte_spinlock_t lock;              /**< spinlock for control queue.
> */
>  	void (*notify_queue)(struct virtqueue *vq, void *cookie); /**<
> notify ops. */
>  	void *notify_cookie;              /**< cookie for notify ops */
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index a581fae408..b546916a9f 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -423,6 +423,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> 
>  	memset(mz->addr, 0, mz->len);
> 
> +	vq->mz = mz;
>  	if (hw->use_va)
>  		vq->vq_ring_mem = (uintptr_t)mz->addr;
>  	else
> @@ -462,14 +463,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> 
>  		vq->sw_ring = sw_ring;
>  		rxvq = &vq->rxq;
> -		rxvq->mz = mz;
>  		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
>  		txvq = &vq->txq;
> -		txvq->mz = mz;
>  	} else if (queue_type == VTNET_CQ) {
>  		cvq = &vq->cq;
> -		cvq->mz = mz;
>  		hw->cvq = cvq;
>  		vq->cq.notify_queue = &virtio_control_queue_notify;
>  	}
> @@ -550,15 +548,10 @@ virtio_free_queues(struct virtio_hw *hw)
>  		if (queue_type == VTNET_RQ) {
>  			rte_free(vq->rxq.fake_mbuf);
>  			rte_free(vq->sw_ring);
> -			rte_memzone_free(vq->rxq.mz);
> -		} else if (queue_type == VTNET_TQ) {
> -			rte_memzone_free(vq->txq.mz);
> -		} else {
> -			rte_memzone_free(vq->cq.mz);
>  		}
> 
>  		virtio_free_queue_headers(vq);
> -
> +		rte_memzone_free(vq->mz);
>  		rte_free(vq);
>  		hw->vqs[i] = NULL;
>  	}
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index a5fe3ea95c..57af630110 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -25,8 +25,6 @@ struct virtnet_rx {
> 
>  	/* Statistics */
>  	struct virtnet_stats stats;
> -
> -	const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
>  };
> 
>  struct virtnet_tx {
> @@ -34,8 +32,6 @@ struct virtnet_tx {
>  	rte_iova_t hdr_mem;               /**< hdr for each xmit packet */
> 
>  	struct virtnet_stats stats;       /* Statistics */
> -
> -	const struct rte_memzone *mz;    /**< mem zone to populate TX ring.
> */
>  };
> 
>  int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
> diff --git a/drivers/net/virtio/virtqueue.c
> b/drivers/net/virtio/virtqueue.c
> index 3b174a5923..41e3529546 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -148,7 +148,6 @@ virtqueue_rxvq_reset_packed(struct virtqueue *vq)
>  {
>  	int size = vq->vq_nentries;
>  	struct vq_desc_extra *dxp;
> -	struct virtnet_rx *rxvq;
>  	uint16_t desc_idx;
> 
>  	vq->vq_used_cons_idx = 0;
> @@ -162,8 +161,7 @@ virtqueue_rxvq_reset_packed(struct virtqueue *vq)
>  	vq->vq_packed.event_flags_shadow = 0;
>  	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> 
> -	rxvq = &vq->rxq;
> -	memset(rxvq->mz->addr, 0, rxvq->mz->len);
> +	memset(vq->mz->addr, 0, vq->mz->len);
> 
>  	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>  		dxp = &vq->vq_descx[desc_idx];
> @@ -201,7 +199,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
> 
>  	txvq = &vq->txq;
>  	txr = txvq->hdr_mz->addr;
> -	memset(txvq->mz->addr, 0, txvq->mz->len);
> +	memset(vq->mz->addr, 0, vq->mz->len);
>  	memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
> 
>  	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
> diff --git a/drivers/net/virtio/virtqueue.h
> b/drivers/net/virtio/virtqueue.h
> index f5058f362c..8b7bfae643 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -201,6 +201,7 @@ struct virtqueue {
>  		struct virtnet_ctl cq;
>  	};
> 
> +	const struct rte_memzone *mz; /**< mem zone to populate ring. */
>  	rte_iova_t vq_ring_mem; /**< physical address of vring,
>  	                         * or virtual address for virtio_user. */
> 
> --
> 2.38.1

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

  reply	other threads:[~2023-01-30  7:52 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30  7:50   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo [this message]
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30  7:49   ` Xia, Chenbo
2023-02-07 10:12     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-01-30  7:53   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31  5:20   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54   ` Stephen Hemminger
2022-12-06 12:58     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31  5:24   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-01-31  5:19   ` Xia, Chenbo
2023-02-07 14:14     ` Maxime Coquelin
2023-01-30  5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08   ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB3504011C0342CD79B27AE8A39CD39@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eperezma@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).