DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
	eperezma@redhat.com, stephen@networkplumber.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH 09/21] net/virtio: refactor indirect desc headers init
Date: Thu,  9 Feb 2023 10:16:58 +0100	[thread overview]
Message-ID: <20230209091710.485512-10-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230209091710.485512-1-maxime.coquelin@redhat.com>

This patch refactors the indirect descriptors headers
initialization in a dedicated function, and makes it used
by both queue init and reset functions.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 drivers/net/virtio/virtio_ethdev.c | 30 +------------
 drivers/net/virtio/virtqueue.c     | 68 ++++++++++++++++++++++--------
 drivers/net/virtio/virtqueue.h     |  2 +
 3 files changed, 54 insertions(+), 46 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 4f6d777951..f839a24d12 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -347,7 +347,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 	unsigned int vq_size, size;
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtnet_rx *rxvq = NULL;
-	struct virtnet_tx *txvq = NULL;
 	struct virtnet_ctl *cvq = NULL;
 	struct virtqueue *vq;
 	void *sw_ring = NULL;
@@ -465,7 +464,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 		rxvq = &vq->rxq;
 		rxvq->fake_mbuf = fake_mbuf;
 	} else if (queue_type == VTNET_TQ) {
-		txvq = &vq->txq;
+		virtqueue_txq_indirect_headers_init(vq);
 	} else if (queue_type == VTNET_CQ) {
 		cvq = &vq->cq;
 		hw->cvq = cvq;
@@ -477,33 +476,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 	else
 		vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
 
-	if (queue_type == VTNET_TQ) {
-		struct virtio_tx_region *txr;
-		unsigned int i;
-
-		txr = txvq->hdr_mz->addr;
-		for (i = 0; i < vq_size; i++) {
-			/* first indirect descriptor is always the tx header */
-			if (!virtio_with_packed_queue(hw)) {
-				struct vring_desc *start_dp = txr[i].tx_indir;
-				vring_desc_init_split(start_dp,
-						      RTE_DIM(txr[i].tx_indir));
-				start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
-					+ offsetof(struct virtio_tx_region, tx_hdr);
-				start_dp->len = hw->vtnet_hdr_size;
-				start_dp->flags = VRING_DESC_F_NEXT;
-			} else {
-				struct vring_packed_desc *start_dp =
-					txr[i].tx_packed_indir;
-				vring_desc_init_indirect_packed(start_dp,
-				      RTE_DIM(txr[i].tx_packed_indir));
-				start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
-					+ offsetof(struct virtio_tx_region, tx_hdr);
-				start_dp->len = hw->vtnet_hdr_size;
-			}
-		}
-	}
-
 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
 		PMD_INIT_LOG(ERR, "setup_queue failed");
 		ret = -EINVAL;
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 41e3529546..fb651a4ca3 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -143,6 +143,54 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
 		virtqueue_rxvq_flush_split(vq);
 }
 
+static void
+virtqueue_txq_indirect_header_init_packed(struct virtqueue *vq, uint32_t idx)
+{
+	struct virtio_tx_region *txr;
+	struct vring_packed_desc *desc;
+	rte_iova_t hdr_mem;
+
+	txr = vq->txq.hdr_mz->addr;
+	hdr_mem = vq->txq.hdr_mem;
+	desc = txr[idx].tx_packed_indir;
+
+	vring_desc_init_indirect_packed(desc, RTE_DIM(txr[idx].tx_packed_indir));
+	desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr);
+	desc->len = vq->hw->vtnet_hdr_size;
+}
+
+static void
+virtqueue_txq_indirect_header_init_split(struct virtqueue *vq, uint32_t idx)
+{
+	struct virtio_tx_region *txr;
+	struct vring_desc *desc;
+	rte_iova_t hdr_mem;
+
+	txr = vq->txq.hdr_mz->addr;
+	hdr_mem = vq->txq.hdr_mem;
+	desc = txr[idx].tx_indir;
+
+	vring_desc_init_split(desc, RTE_DIM(txr[idx].tx_indir));
+	desc->addr = hdr_mem + idx * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr);
+	desc->len = vq->hw->vtnet_hdr_size;
+	desc->flags = VRING_DESC_F_NEXT;
+}
+
+void
+virtqueue_txq_indirect_headers_init(struct virtqueue *vq)
+{
+	uint32_t i;
+
+	if (!virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC))
+		return;
+
+	for (i = 0; i < vq->vq_nentries; i++)
+		if (virtio_with_packed_queue(vq->hw))
+			virtqueue_txq_indirect_header_init_packed(vq, i);
+		else
+			virtqueue_txq_indirect_header_init_split(vq, i);
+}
+
 int
 virtqueue_rxvq_reset_packed(struct virtqueue *vq)
 {
@@ -182,10 +230,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
 {
 	int size = vq->vq_nentries;
 	struct vq_desc_extra *dxp;
-	struct virtnet_tx *txvq;
 	uint16_t desc_idx;
-	struct virtio_tx_region *txr;
-	struct vring_packed_desc *start_dp;
 
 	vq->vq_used_cons_idx = 0;
 	vq->vq_desc_head_idx = 0;
@@ -197,10 +242,8 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
 	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
 	vq->vq_packed.event_flags_shadow = 0;
 
-	txvq = &vq->txq;
-	txr = txvq->hdr_mz->addr;
 	memset(vq->mz->addr, 0, vq->mz->len);
-	memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
+	memset(vq->txq.hdr_mz->addr, 0, vq->txq.hdr_mz->len);
 
 	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
 		dxp = &vq->vq_descx[desc_idx];
@@ -208,20 +251,11 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
 			rte_pktmbuf_free(dxp->cookie);
 			dxp->cookie = NULL;
 		}
-
-		if (virtio_with_feature(vq->hw, VIRTIO_RING_F_INDIRECT_DESC)) {
-			/* first indirect descriptor is always the tx header */
-			start_dp = txr[desc_idx].tx_packed_indir;
-			vring_desc_init_indirect_packed(start_dp,
-							RTE_DIM(txr[desc_idx].tx_packed_indir));
-			start_dp->addr = txvq->hdr_mem + desc_idx * sizeof(*txr)
-					 + offsetof(struct virtio_tx_region, tx_hdr);
-			start_dp->len = vq->hw->vtnet_hdr_size;
-		}
 	}
 
+	virtqueue_txq_indirect_headers_init(vq);
 	vring_desc_init_packed(vq, size);
-
 	virtqueue_disable_intr(vq);
+
 	return 0;
 }
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 8b7bfae643..d453c3ec26 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -384,6 +384,8 @@ int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
 
 int virtqueue_txvq_reset_packed(struct virtqueue *vq);
 
+void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
+
 static inline int
 virtqueue_full(const struct virtqueue *vq)
 {
-- 
2.39.1


  parent reply	other threads:[~2023-02-09  9:18 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-09  9:16 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09  9:16 ` [PATCH 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-02-09  9:16 ` [PATCH 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-02-09  9:16 ` [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-02-09  9:16 ` [PATCH 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-02-09  9:16 ` [PATCH 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-02-09  9:16 ` [PATCH 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-02-09  9:16 ` [PATCH 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-02-09  9:16 ` [PATCH 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-02-09  9:16 ` Maxime Coquelin [this message]
2023-02-09  9:16 ` [PATCH 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-02-09  9:17 ` [PATCH 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-02-09  9:17 ` [PATCH 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-02-09  9:17 ` [PATCH 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-02-09  9:17 ` [PATCH 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-02-09  9:17 ` [PATCH 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-02-09  9:17 ` [PATCH 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-02-09  9:17 ` [PATCH 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-02-09  9:17 ` [PATCH 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-02-09  9:17 ` [PATCH 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2023-02-09  9:17 ` [PATCH 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-02-09  9:17 ` [PATCH 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-02-09  9:21 ` [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09 12:12 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230209091710.485512-10-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbo.xia@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=eperezma@redhat.com \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).