DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: "Coquelin, Maxime" <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"david.marchand@redhat.com" <david.marchand@redhat.com>,
	"eperezma@redhat.com" <eperezma@redhat.com>
Subject: RE: [PATCH v1 09/21] net/virtio: refactor indirect desc headers init
Date: Mon, 30 Jan 2023 07:52:40 +0000	[thread overview]
Message-ID: <SN6PR11MB3504C25116F04BC47F3088669CD39@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20221130155639.150553-10-maxime.coquelin@redhat.com>

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 09/21] net/virtio: refactor indirect desc headers init
> 
> This patch refactors the indirect descriptors headers
> initialization in a dedicated function, and makes it used
> by both queue init and reset functions.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c | 30 +------------
>  drivers/net/virtio/virtqueue.c     | 68 ++++++++++++++++++++++--------
>  drivers/net/virtio/virtqueue.h     |  2 +
>  3 files changed, 54 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index b546916a9f..8b17b450ec 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -347,7 +347,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	unsigned int vq_size, size;
>  	struct virtio_hw *hw = dev->data->dev_private;
>  	struct virtnet_rx *rxvq = NULL;
> -	struct virtnet_tx *txvq = NULL;
>  	struct virtnet_ctl *cvq = NULL;
>  	struct virtqueue *vq;
>  	void *sw_ring = NULL;
> @@ -465,7 +464,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  		rxvq = &vq->rxq;
>  		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
> -		txvq = &vq->txq;
> +		virtqueue_txq_indirect_headers_init(vq);
>  	} else if (queue_type == VTNET_CQ) {
>  		cvq = &vq->cq;
>  		hw->cvq = cvq;
> @@ -477,33 +476,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	else
>  		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
> 
> -	if (queue_type == VTNET_TQ) {
> -		struct virtio_tx_region *txr;
> -		unsigned int i;
> -
> -		txr = txvq->hdr_mz->addr;
> -		for (i = 0; i < vq_size; i++) {
> -			/* first indirect descriptor is always the tx header */
> -			if (!virtio_with_packed_queue(hw)) {
> -				struct vring_desc *start_dp = txr[i].tx_indir;
> -				vring_desc_init_split(start_dp,
> -						      RTE_DIM(txr[i].tx_indir));
> -				start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
> -					+ offsetof(struct virtio_tx_region, tx_hdr);
> -				start_dp->len = hw->vtnet_hdr_size;
> -				start_dp->flags = VRING_DESC_F_NEXT;
> -			} else {
> -				struct vring_packed_desc *start_dp =
> -					txr[i].tx_packed_indir;
> -				vring_desc_init_indirect_packed(start_dp,
> -				      RTE_DIM(txr[i].tx_packed_indir));
> -				start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
> -					+ offsetof(struct virtio_tx_region, tx_hdr);
> -				start_dp->len = hw->vtnet_hdr_size;
> -			}
> -		}
> -	}
> -
>  	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
>  		PMD_INIT_LOG(ERR, "setup_queue failed");
>  		ret = -EINVAL;
> diff --git a/drivers/net/virtio/virtqueue.c
> b/drivers/net/virtio/virtqueue.c
> index 41e3529546..fb651a4ca3 100644
> --- a/drivers/net/virtio/virtqueue.c
> +++ b/drivers/net/virtio/virtqueue.c
> @@ -143,6 +143,54 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
>  		virtqueue_rxvq_flush_split(vq);
>  }
> 
> +static void
> +virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t
> idx)
> +{
> +	struct virtio_tx_region *txr;
> +	struct vring_packed_desc *desc;
> +	rte_iova_t hdr_mem;
> +
> +	txr = vq->txq.hdr_mz->addr;
> +	hdr_mem = vq->txq.hdr_mem;
> +	desc = txr[idx].tx_packed_indir;
> +
> +	vring_desc_init_indirect_packed(desc,
> RTE_DIM(txr[idx].tx_packed_indir));
> +	desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct
> virtio_tx_region, tx_hdr);
> +	desc->len = vq->hw->vtnet_hdr_size;
> +}
> +
> +static void
> +virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t
> idx)
> +{
> +	struct virtio_tx_region *txr;
> +	struct vring_desc *desc;
> +	rte_iova_t hdr_mem;
> +
> +	txr = vq->txq.hdr_mz->addr;
> +	hdr_mem = vq->txq.hdr_mem;
> +	desc = txr[idx].tx_indir;
> +
> +	vring_desc_init_split(desc, RTE_DIM(txr[idx].tx_indir));
> +	desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct
> virtio_tx_region, tx_hdr);
> +	desc->len = vq->hw->vtnet_hdr_size;
> +	desc->flags = VRING_DESC_F_NEXT;
> +}
> +
> +void
> +virtqueue_txq_indirect_headers_init(struct virtqueue *vq)
> +{
> +	uint32_t i;
> +
> +	if (!virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
> +		return;
> +
> +	for (i = 0; i < vq->vq_nentries; i++)
> +		if (virtio_with_packed_queue(vq->hw))
> +			virtqueue_txq_indirect_header_init_packed(vq, i);
> +		else
> +			virtqueue_txq_indirect_header_init_split(vq, i);
> +}
> +
>  int
>  virtqueue_rxvq_reset_packed(struct virtqueue *vq)
>  {
> @@ -182,10 +230,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
>  {
>  	int size = vq->vq_nentries;
>  	struct vq_desc_extra *dxp;
> -	struct virtnet_tx *txvq;
>  	uint16_t desc_idx;
> -	struct virtio_tx_region *txr;
> -	struct vring_packed_desc *start_dp;
> 
>  	vq->vq_used_cons_idx = 0;
>  	vq->vq_desc_head_idx = 0;
> @@ -197,10 +242,8 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
>  	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
>  	vq->vq_packed.event_flags_shadow = 0;
> 
> -	txvq = &vq->txq;
> -	txr = txvq->hdr_mz->addr;
>  	memset(vq->mz->addr, 0, vq->mz->len);
> -	memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
> +	memset(vq->txq.hdr_mz->addr, 0, vq->txq.hdr_mz->len);
> 
>  	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>  		dxp = &vq->vq_descx[desc_idx];
> @@ -208,20 +251,11 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
>  			rte_pktmbuf_free(dxp->cookie);
>  			dxp->cookie = NULL;
>  		}
> -
> -		if (virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
> {
> -			/* first indirect descriptor is always the tx header */
> -			start_dp = txr[desc_idx].tx_packed_indir;
> -			vring_desc_init_indirect_packed(start_dp,
> -
> 	RTE_DIM(txr[desc_idx].tx_packed_indir));
> -			start_dp->addr = txvq->hdr_mem + desc_idx * sizeof(*txr)
> -					 + offsetof(struct virtio_tx_region, tx_hdr);
> -			start_dp->len = vq->hw->vtnet_hdr_size;
> -		}
>  	}
> 
> +	virtqueue_txq_indirect_headers_init(vq);
>  	vring_desc_init_packed(vq, size);
> -
>  	virtqueue_disable_intr(vq);
> +
>  	return 0;
>  }
> diff --git a/drivers/net/virtio/virtqueue.h
> b/drivers/net/virtio/virtqueue.h
> index 8b7bfae643..d453c3ec26 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -384,6 +384,8 @@ int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
> 
>  int virtqueue_txvq_reset_packed(struct virtqueue *vq);
> 
> +void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
> +
>  static inline int
>  virtqueue_full(const struct virtqueue *vq)
>  {
> --
> 2.38.1

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

  reply	other threads:[~2023-01-30  7:53 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30  7:50   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo [this message]
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30  7:49   ` Xia, Chenbo
2023-02-07 10:12     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-01-30  7:53   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31  5:20   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54   ` Stephen Hemminger
2022-12-06 12:58     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31  5:24   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-01-31  5:19   ` Xia, Chenbo
2023-02-07 14:14     ` Maxime Coquelin
2023-01-30  5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08   ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB3504C25116F04BC47F3088669CD39@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eperezma@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).