DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: "Coquelin, Maxime" <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"david.marchand@redhat.com" <david.marchand@redhat.com>,
	"eperezma@redhat.com" <eperezma@redhat.com>
Subject: RE: [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue
Date: Mon, 30 Jan 2023 07:51:47 +0000	[thread overview]
Message-ID: <SN6PR11MB3504D97793172E797C0B9C099CD39@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20221130155639.150553-5-maxime.coquelin@redhat.com>

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Wednesday, November 30, 2022 11:56 PM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; eperezma@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue
> 
> The port ID information is duplicated in several places.
> This patch removes it from the virtnet_rx struct as it can
> be found in virtio_hw struct.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c      |  1 -
>  drivers/net/virtio/virtio_rxtx.c        | 25 ++++++++++---------------
>  drivers/net/virtio/virtio_rxtx.h        |  1 -
>  drivers/net/virtio/virtio_rxtx_packed.c |  3 +--
>  drivers/net/virtio/virtio_rxtx_simple.c |  3 ++-
>  drivers/net/virtio/virtio_rxtx_simple.h |  5 +++--
>  6 files changed, 16 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index cead5f0884..1c68e5a283 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -462,7 +462,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> 
>  		vq->sw_ring = sw_ring;
>  		rxvq = &vq->rxq;
> -		rxvq->port_id = dev->data->port_id;
>  		rxvq->mz = mz;
>  		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index bd95e8ceb5..45c04aa3f8 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -1024,7 +1024,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  			continue;
>  		}
> 
> -		rxm->port = rxvq->port_id;
> +		rxm->port = hw->port_id;
>  		rxm->data_off = RTE_PKTMBUF_HEADROOM;
>  		rxm->ol_flags = 0;
>  		rxm->vlan_tci = 0;
> @@ -1066,8 +1066,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts, uint16_t nb_pkts)
>  			}
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> @@ -1127,7 +1126,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct
> rte_mbuf **rx_pkts,
>  			continue;
>  		}
> 
> -		rxm->port = rxvq->port_id;
> +		rxm->port = hw->port_id;
>  		rxm->data_off = RTE_PKTMBUF_HEADROOM;
>  		rxm->ol_flags = 0;
>  		rxm->vlan_tci = 0;
> @@ -1169,8 +1168,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct
> rte_mbuf **rx_pkts,
>  			}
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> @@ -1258,7 +1256,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
>  		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
>  		rxm->data_len = (uint16_t)(len[i] - hdr_size);
> 
> -		rxm->port = rxvq->port_id;
> +		rxm->port = hw->port_id;
> 
>  		rx_pkts[nb_rx] = rxm;
>  		prev = rxm;
> @@ -1352,8 +1350,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
>  			}
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> @@ -1437,7 +1434,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>  		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
>  		rxm->data_len = (uint16_t)(len[i] - hdr_size);
> 
> -		rxm->port = rxvq->port_id;
> +		rxm->port = hw->port_id;
> 
>  		rx_pkts[nb_rx] = rxm;
>  		prev = rxm;
> @@ -1530,8 +1527,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>  			}
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> @@ -1610,7 +1606,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
>  		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
>  		rxm->data_len = (uint16_t)(len[i] - hdr_size);
> 
> -		rxm->port = rxvq->port_id;
> +		rxm->port = hw->port_id;
>  		rx_pkts[nb_rx] = rxm;
>  		prev = rxm;
> 
> @@ -1699,8 +1695,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
>  			}
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 226c722d64..97de9eb0a3 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -24,7 +24,6 @@ struct virtnet_rx {
>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
> 
>  	uint16_t queue_id;   /**< DPDK queue index. */
> -	uint16_t port_id;     /**< Device port identifier. */
> 
>  	/* Statistics */
>  	struct virtnet_stats stats;
> diff --git a/drivers/net/virtio/virtio_rxtx_packed.c
> b/drivers/net/virtio/virtio_rxtx_packed.c
> index 45cf39df22..5f7d4903bc 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed.c
> +++ b/drivers/net/virtio/virtio_rxtx_packed.c
> @@ -124,8 +124,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
>  					free_cnt);
>  			nb_enqueued += free_cnt;
>  		} else {
> -			struct rte_eth_dev *dev =
> -				&rte_eth_devices[rxvq->port_id];
> +			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
>  			dev->data->rx_mbuf_alloc_failed += free_cnt;
>  		}
>  	}
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.c
> b/drivers/net/virtio/virtio_rxtx_simple.c
> index f248869a8f..438256970d 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple.c
> @@ -30,12 +30,13 @@
>  int __rte_cold
>  virtio_rxq_vec_setup(struct virtnet_rx *rxq)
>  {
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxq);
>  	uintptr_t p;
>  	struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
> 
>  	mb_def.nb_segs = 1;
>  	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
> -	mb_def.port = rxq->port_id;
> +	mb_def.port = vq->hw->port_id;
>  	rte_mbuf_refcnt_set(&mb_def, 1);
> 
>  	/* prevent compiler reordering: rearm_data covers previous fields */
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.h
> b/drivers/net/virtio/virtio_rxtx_simple.h
> index d8f96e0434..8e235f4dbc 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.h
> +++ b/drivers/net/virtio/virtio_rxtx_simple.h
> @@ -32,8 +32,9 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
>  	ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
>  		RTE_VIRTIO_VPMD_RX_REARM_THRESH);
>  	if (unlikely(ret)) {
> -		rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
> -			RTE_VIRTIO_VPMD_RX_REARM_THRESH;
> +		struct rte_eth_dev *dev = &rte_eth_devices[vq->hw->port_id];
> +
> +		dev->data->rx_mbuf_alloc_failed +=
> RTE_VIRTIO_VPMD_RX_REARM_THRESH;
>  		return;
>  	}
> 
> --
> 2.38.1

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

  reply	other threads:[~2023-01-30  7:51 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30  7:50   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo [this message]
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30  7:51   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30  7:52   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30  7:49   ` Xia, Chenbo
2023-02-07 10:12     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-01-30  7:53   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31  5:20   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31  5:21   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31  5:22   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54   ` Stephen Hemminger
2022-12-06 12:58     ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31  5:24   ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-01-31  5:19   ` Xia, Chenbo
2023-02-07 14:14     ` Maxime Coquelin
2023-01-30  5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08   ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB3504D97793172E797C0B9C099CD39@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eperezma@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).