DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/gve: change QPLs to be queue resources
@ 2024-06-13 22:59 Joshua Washington
  2024-06-13 23:49 ` [PATCH v2] " Joshua Washington
  2024-06-13 23:53 ` Joshua Washington
  0 siblings, 2 replies; 3+ messages in thread
From: Joshua Washington @ 2024-06-13 22:59 UTC (permalink / raw)
  To: Thomas Monjalon, Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

Prior to this change, queue page lists (QPLs) were kept as device
resources, being stored in the gve_priv struct. This does not make
sense because each QPL inherently belongs to a single queue.

This change moves all QPL resources into the queues themselves, and
couples QPL allocation/registration and de-registration/deallocation
with the queue creation and destruction processes, respectively. Before
this change, QPL structs part of gve_priv were allocated as part of
driver initialization, which similarly does not make sense.

Change-Id: I3e2de1facef7449cd581d7bfa0dc2a9ca80bd86a
Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
---
 .mailmap                     |   2 +-
 drivers/net/gve/gve_ethdev.c | 159 ++++++++++++++++++++---------------
 drivers/net/gve/gve_ethdev.h |   9 +-
 drivers/net/gve/gve_rx.c     |  22 +++--
 drivers/net/gve/gve_tx.c     |  21 +++--
 5 files changed, 127 insertions(+), 86 deletions(-)

diff --git a/.mailmap b/.mailmap
index 6b396107d0..48af16edf5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -503,7 +503,7 @@ Harold Huang <baymaxhuang@gmail.com>
 Harrison McCullough <harrison_mccullough@labs.att.com>
 Harry van Haaren <harry.van.haaren@intel.com>
 Harshad Narayane <harshad.suresh.narayane@intel.com>
-Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
+Harshitha Ramamurthy <hramamurthy@google.com>
 Hasan Alayli <halayli@gmail.com>
 Hayato Momma <h-momma@ce.jp.nec.com>
 Heinrich Kuhn <heinrich.kuhn@corigine.com> <heinrich.kuhn@netronome.com>
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 475745b9c0..ca92277a68 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,67 +22,121 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }
 
-static int
-gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
+static struct gve_queue_page_list *
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
 {
-	char z_name[RTE_MEMZONE_NAMESIZE];
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
 	dma_addr_t page_bus;
 	uint32_t i;
 
-	if (priv->num_registered_pages + pages >
-	    priv->max_registered_pages) {
-		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
-			    priv->num_registered_pages + pages,
-			    priv->max_registered_pages);
-		return -EINVAL;
-	}
-	qpl = &priv->qpl[id];
-	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
-	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
+	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
+	if (!qpl)
+		return NULL;
+
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
 					 rte_socket_id(),
 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
 	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
+		goto free_qpl_struct;
 	}
-	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
+	qpl->page_buses = rte_zmalloc("qpl page buses",
+		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
+		goto free_qpl_memzone;
 	}
 	page_bus = mz->iova;
-	for (i = 0; i < pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		qpl->page_buses[i] = page_bus;
 		page_bus += PAGE_SIZE;
 	}
-	qpl->id = id;
 	qpl->mz = mz;
-	qpl->num_entries = pages;
-
-	priv->num_registered_pages += pages;
-
-	return 0;
+	qpl->num_entries = num_pages;
+	return qpl;
+
+free_qpl_memzone:
+	rte_memzone_free(qpl->mz);
+free_qpl_struct:
+	rte_free(qpl);
+	return NULL;
 }
 
 static void
-gve_free_qpls(struct gve_priv *priv)
+gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 {
-	uint16_t nb_txqs = priv->max_nb_txq;
-	uint16_t nb_rxqs = priv->max_nb_rxq;
-	uint32_t i;
+	if (qpl->mz) {
+		rte_memzone_free(qpl->mz);
+		qpl->mz = NULL;
+	}
+	if (qpl->page_buses) {
+		rte_free(qpl->page_buses);
+		qpl->page_buses = NULL;
+	}
+	rte_free(qpl);
+}
 
-	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
-		return;
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages)
+{
+	const char *queue_type_string = is_rx ? "rx" : "tx";
+	char qpl_name[RTE_MEMZONE_NAMESIZE];
+	struct gve_queue_page_list *qpl;
+	int err;
+
+	/* Allocate a new QPL. */
+	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
+		priv->pci_dev->device.name, queue_type_string, queue_id);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	if (!qpl) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to alloc %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		return NULL;
+	}
+
+	/* Assign the QPL an ID. */
+	qpl->id = queue_id;
+	if (is_rx)
+		qpl->id += priv->max_nb_txq;
 
-	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
-		if (priv->qpl[i].mz != NULL)
-			rte_memzone_free(priv->qpl[i].mz);
-		rte_free(priv->qpl[i].page_buses);
+	/* Validate page registration limit and register QPLs. */
+	if (priv->num_registered_pages + qpl->num_entries >
+	    priv->max_registered_pages) {
+		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
+			    priv->num_registered_pages + qpl->num_entries,
+			    priv->max_registered_pages);
+		goto cleanup_qpl;
+	}
+	err = gve_adminq_register_page_list(priv, qpl);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to register %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		goto cleanup_qpl;
 	}
+	priv->num_registered_pages += qpl->num_entries;
+	return qpl;
 
-	rte_free(priv->qpl);
+cleanup_qpl:
+	gve_free_queue_page_list(qpl);
+	return NULL;
+}
+
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl)
+{
+	int err = gve_adminq_unregister_page_list(priv, qpl->id);
+	if (err) {
+		PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id);
+		return err;
+	}
+	priv->num_registered_pages -= qpl->num_entries;
+	gve_free_queue_page_list(qpl);
+	return 0;
 }
 
 static int
@@ -348,7 +402,6 @@ gve_dev_close(struct rte_eth_dev *dev)
 			gve_rx_queue_release_dqo(dev, i);
 	}
 
-	gve_free_qpls(priv);
 	rte_free(priv->adminq);
 
 	dev->data->mac_addrs = NULL;
@@ -1038,9 +1091,7 @@ gve_setup_device_resources(struct gve_priv *priv)
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
-	uint16_t pages;
 	int num_ntfy;
-	uint32_t i;
 	int err;
 
 	/* Set up the adminq */
@@ -1096,40 +1147,10 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
 		    priv->max_nb_txq, priv->max_nb_rxq);
 
-	/* In GQI_QPL queue format:
-	 * Allocate queue page lists according to max queue number
-	 * tx qpl id should start from 0 while rx qpl id should start
-	 * from priv->max_nb_txq
-	 */
-	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
-		priv->qpl = rte_zmalloc("gve_qpl",
-					(priv->max_nb_txq + priv->max_nb_rxq) *
-					sizeof(struct gve_queue_page_list), 0);
-		if (priv->qpl == NULL) {
-			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
-			err = -ENOMEM;
-			goto free_adminq;
-		}
-
-		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
-			if (i < priv->max_nb_txq)
-				pages = priv->tx_pages_per_qpl;
-			else
-				pages = priv->rx_data_slot_cnt;
-			err = gve_alloc_queue_page_list(priv, i, pages);
-			if (err != 0) {
-				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
-				goto err_qpl;
-			}
-		}
-	}
-
 setup_device:
 	err = gve_setup_device_resources(priv);
 	if (!err)
 		return 0;
-err_qpl:
-	gve_free_qpls(priv);
 free_adminq:
 	gve_adminq_free(priv);
 	return err;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 9b19fc55e3..ca8c6404fd 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -288,8 +288,6 @@ struct gve_priv {
 	uint16_t max_mtu;
 	struct rte_ether_addr dev_addr; /* mac address */
 
-	struct gve_queue_page_list *qpl;
-
 	struct gve_tx_queue **txqs;
 	struct gve_rx_queue **rxqs;
 
@@ -416,6 +414,13 @@ gve_set_rx_function(struct rte_eth_dev *dev);
 void
 gve_set_tx_function(struct rte_eth_dev *dev);
 
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages);
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl);
+
 /* Below functions are used for DQO */
 
 int
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 36a1b73c65..41987ec870 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -279,7 +279,7 @@ gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		q->qpl = NULL;
 	}
 
@@ -382,11 +382,15 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	rxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;
 	rxq->data_mz = mz;
+
+	/* Allocate and register QPL for the queue. */
 	if (rxq->is_gqi_qpl) {
-		rxq->qpl = &hw->qpl[rxq->ntfy_id];
-		err = gve_adminq_register_page_list(hw, rxq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
+						     hw->rx_data_slot_cnt);
+		if (!rxq->qpl) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to alloc rx qpl for queue %hu.",
+				    queue_id);
 			goto err_data_ring;
 		}
 	}
@@ -397,7 +401,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
 		err = -ENOMEM;
-		goto err_data_ring;
+		goto err_qpl;
 	}
 	rxq->qres = (struct gve_queue_resources *)mz->addr;
 	rxq->qres_mz = mz;
@@ -407,7 +411,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	dev->data->rx_queues[queue_id] = rxq;
 
 	return 0;
-
+err_qpl:
+	if (rxq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, rxq->qpl);
+		rxq->qpl = NULL;
+	}
 err_data_ring:
 	rte_memzone_free(rxq->data_mz);
 err_rx_ring:
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 2e0d001109..70d3ef060c 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -536,7 +536,7 @@ gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		rte_free(q->iov_ring);
 		q->qpl = NULL;
 	}
@@ -619,6 +619,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	txq->tx_ring_phys_addr = mz->iova;
 	txq->mz = mz;
 
+	/* QPL-specific allocations. */
 	if (txq->is_gqi_qpl) {
 		txq->iov_ring = rte_zmalloc_socket("gve tx iov ring",
 						   sizeof(struct gve_tx_iovec) * nb_desc,
@@ -628,10 +629,12 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 			err = -ENOMEM;
 			goto err_tx_ring;
 		}
-		txq->qpl = &hw->qpl[queue_id];
-		err = gve_adminq_register_page_list(hw, txq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+
+		txq->qpl = gve_setup_queue_page_list(hw, queue_id, false,
+						     hw->tx_pages_per_qpl);
+		if (!txq->qpl) {
+			PMD_DRV_LOG(ERR, "Failed to alloc tx qpl for queue %hu.",
+				    queue_id);
 			goto err_iov_ring;
 		}
 	}
@@ -641,7 +644,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX resource");
 		err = -ENOMEM;
-		goto err_iov_ring;
+		goto err_qpl;
 	}
 	txq->qres = (struct gve_queue_resources *)mz->addr;
 	txq->qres_mz = mz;
@@ -651,7 +654,11 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	dev->data->tx_queues[queue_id] = txq;
 
 	return 0;
-
+err_qpl:
+	if (txq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, txq->qpl);
+		txq->qpl = NULL;
+	}
 err_iov_ring:
 	if (txq->is_gqi_qpl)
 		rte_free(txq->iov_ring);
-- 
2.45.2.627.g7a2c4fd464-goog


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2] net/gve: change QPLs to be queue resources
  2024-06-13 22:59 [PATCH] net/gve: change QPLs to be queue resources Joshua Washington
@ 2024-06-13 23:49 ` Joshua Washington
  2024-06-13 23:53 ` Joshua Washington
  1 sibling, 0 replies; 3+ messages in thread
From: Joshua Washington @ 2024-06-13 23:49 UTC (permalink / raw)
  To: Thomas Monjalon, Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

Prior to this change, queue page lists (QPLs) were kept as device
resources, being stored in the gve_priv struct. This does not make
sense because each QPL inherently belongs to a single queue.

This change moves all QPL resources into the queues themselves, and
couples QPL allocation/registration and de-registration/deallocation
with the queue creation and destruction processes, respectively. Before
this change, QPL structs part of gve_priv were allocated as part of
driver initialization, which similarly does not make sense.

Change-Id: I7098eae9d2469463c4f8dc76e770795df9b0ca91
Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
---
 .mailmap                     |   2 +-
 drivers/net/gve/gve_ethdev.c | 159 ++++++++++++++++++++---------------
 drivers/net/gve/gve_ethdev.h |   9 +-
 drivers/net/gve/gve_rx.c     |  22 +++--
 drivers/net/gve/gve_tx.c     |  21 +++--
 5 files changed, 127 insertions(+), 86 deletions(-)

diff --git a/.mailmap b/.mailmap
index 6b396107d0..48af16edf5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -503,7 +503,7 @@ Harold Huang <baymaxhuang@gmail.com>
 Harrison McCullough <harrison_mccullough@labs.att.com>
 Harry van Haaren <harry.van.haaren@intel.com>
 Harshad Narayane <harshad.suresh.narayane@intel.com>
-Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
+Harshitha Ramamurthy <hramamurthy@google.com>
 Hasan Alayli <halayli@gmail.com>
 Hayato Momma <h-momma@ce.jp.nec.com>
 Heinrich Kuhn <heinrich.kuhn@corigine.com> <heinrich.kuhn@netronome.com>
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 475745b9c0..ca92277a68 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,67 +22,121 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }
 
-static int
-gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
+static struct gve_queue_page_list *
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
 {
-	char z_name[RTE_MEMZONE_NAMESIZE];
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
 	dma_addr_t page_bus;
 	uint32_t i;
 
-	if (priv->num_registered_pages + pages >
-	    priv->max_registered_pages) {
-		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
-			    priv->num_registered_pages + pages,
-			    priv->max_registered_pages);
-		return -EINVAL;
-	}
-	qpl = &priv->qpl[id];
-	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
-	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
+	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
+	if (!qpl)
+		return NULL;
+
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
 					 rte_socket_id(),
 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
 	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
+		goto free_qpl_struct;
 	}
-	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
+	qpl->page_buses = rte_zmalloc("qpl page buses",
+		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
+		goto free_qpl_memzone;
 	}
 	page_bus = mz->iova;
-	for (i = 0; i < pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		qpl->page_buses[i] = page_bus;
 		page_bus += PAGE_SIZE;
 	}
-	qpl->id = id;
 	qpl->mz = mz;
-	qpl->num_entries = pages;
-
-	priv->num_registered_pages += pages;
-
-	return 0;
+	qpl->num_entries = num_pages;
+	return qpl;
+
+free_qpl_memzone:
+	rte_memzone_free(qpl->mz);
+free_qpl_struct:
+	rte_free(qpl);
+	return NULL;
 }
 
 static void
-gve_free_qpls(struct gve_priv *priv)
+gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 {
-	uint16_t nb_txqs = priv->max_nb_txq;
-	uint16_t nb_rxqs = priv->max_nb_rxq;
-	uint32_t i;
+	if (qpl->mz) {
+		rte_memzone_free(qpl->mz);
+		qpl->mz = NULL;
+	}
+	if (qpl->page_buses) {
+		rte_free(qpl->page_buses);
+		qpl->page_buses = NULL;
+	}
+	rte_free(qpl);
+}
 
-	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
-		return;
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages)
+{
+	const char *queue_type_string = is_rx ? "rx" : "tx";
+	char qpl_name[RTE_MEMZONE_NAMESIZE];
+	struct gve_queue_page_list *qpl;
+	int err;
+
+	/* Allocate a new QPL. */
+	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
+		priv->pci_dev->device.name, queue_type_string, queue_id);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	if (!qpl) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to alloc %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		return NULL;
+	}
+
+	/* Assign the QPL an ID. */
+	qpl->id = queue_id;
+	if (is_rx)
+		qpl->id += priv->max_nb_txq;
 
-	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
-		if (priv->qpl[i].mz != NULL)
-			rte_memzone_free(priv->qpl[i].mz);
-		rte_free(priv->qpl[i].page_buses);
+	/* Validate page registration limit and register QPLs. */
+	if (priv->num_registered_pages + qpl->num_entries >
+	    priv->max_registered_pages) {
+		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
+			    priv->num_registered_pages + qpl->num_entries,
+			    priv->max_registered_pages);
+		goto cleanup_qpl;
+	}
+	err = gve_adminq_register_page_list(priv, qpl);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to register %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		goto cleanup_qpl;
 	}
+	priv->num_registered_pages += qpl->num_entries;
+	return qpl;
 
-	rte_free(priv->qpl);
+cleanup_qpl:
+	gve_free_queue_page_list(qpl);
+	return NULL;
+}
+
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl)
+{
+	int err = gve_adminq_unregister_page_list(priv, qpl->id);
+	if (err) {
+		PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id);
+		return err;
+	}
+	priv->num_registered_pages -= qpl->num_entries;
+	gve_free_queue_page_list(qpl);
+	return 0;
 }
 
 static int
@@ -348,7 +402,6 @@ gve_dev_close(struct rte_eth_dev *dev)
 			gve_rx_queue_release_dqo(dev, i);
 	}
 
-	gve_free_qpls(priv);
 	rte_free(priv->adminq);
 
 	dev->data->mac_addrs = NULL;
@@ -1038,9 +1091,7 @@ gve_setup_device_resources(struct gve_priv *priv)
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
-	uint16_t pages;
 	int num_ntfy;
-	uint32_t i;
 	int err;
 
 	/* Set up the adminq */
@@ -1096,40 +1147,10 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
 		    priv->max_nb_txq, priv->max_nb_rxq);
 
-	/* In GQI_QPL queue format:
-	 * Allocate queue page lists according to max queue number
-	 * tx qpl id should start from 0 while rx qpl id should start
-	 * from priv->max_nb_txq
-	 */
-	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
-		priv->qpl = rte_zmalloc("gve_qpl",
-					(priv->max_nb_txq + priv->max_nb_rxq) *
-					sizeof(struct gve_queue_page_list), 0);
-		if (priv->qpl == NULL) {
-			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
-			err = -ENOMEM;
-			goto free_adminq;
-		}
-
-		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
-			if (i < priv->max_nb_txq)
-				pages = priv->tx_pages_per_qpl;
-			else
-				pages = priv->rx_data_slot_cnt;
-			err = gve_alloc_queue_page_list(priv, i, pages);
-			if (err != 0) {
-				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
-				goto err_qpl;
-			}
-		}
-	}
-
 setup_device:
 	err = gve_setup_device_resources(priv);
 	if (!err)
 		return 0;
-err_qpl:
-	gve_free_qpls(priv);
 free_adminq:
 	gve_adminq_free(priv);
 	return err;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 9b19fc55e3..ca8c6404fd 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -288,8 +288,6 @@ struct gve_priv {
 	uint16_t max_mtu;
 	struct rte_ether_addr dev_addr; /* mac address */
 
-	struct gve_queue_page_list *qpl;
-
 	struct gve_tx_queue **txqs;
 	struct gve_rx_queue **rxqs;
 
@@ -416,6 +414,13 @@ gve_set_rx_function(struct rte_eth_dev *dev);
 void
 gve_set_tx_function(struct rte_eth_dev *dev);
 
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages);
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl);
+
 /* Below functions are used for DQO */
 
 int
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 36a1b73c65..41987ec870 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -279,7 +279,7 @@ gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		q->qpl = NULL;
 	}
 
@@ -382,11 +382,15 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	rxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;
 	rxq->data_mz = mz;
+
+	/* Allocate and register QPL for the queue. */
 	if (rxq->is_gqi_qpl) {
-		rxq->qpl = &hw->qpl[rxq->ntfy_id];
-		err = gve_adminq_register_page_list(hw, rxq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
+						     hw->rx_data_slot_cnt);
+		if (!rxq->qpl) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to alloc rx qpl for queue %hu.",
+				    queue_id);
 			goto err_data_ring;
 		}
 	}
@@ -397,7 +401,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
 		err = -ENOMEM;
-		goto err_data_ring;
+		goto err_qpl;
 	}
 	rxq->qres = (struct gve_queue_resources *)mz->addr;
 	rxq->qres_mz = mz;
@@ -407,7 +411,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	dev->data->rx_queues[queue_id] = rxq;
 
 	return 0;
-
+err_qpl:
+	if (rxq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, rxq->qpl);
+		rxq->qpl = NULL;
+	}
 err_data_ring:
 	rte_memzone_free(rxq->data_mz);
 err_rx_ring:
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 2e0d001109..70d3ef060c 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -536,7 +536,7 @@ gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		rte_free(q->iov_ring);
 		q->qpl = NULL;
 	}
@@ -619,6 +619,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	txq->tx_ring_phys_addr = mz->iova;
 	txq->mz = mz;
 
+	/* QPL-specific allocations. */
 	if (txq->is_gqi_qpl) {
 		txq->iov_ring = rte_zmalloc_socket("gve tx iov ring",
 						   sizeof(struct gve_tx_iovec) * nb_desc,
@@ -628,10 +629,12 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 			err = -ENOMEM;
 			goto err_tx_ring;
 		}
-		txq->qpl = &hw->qpl[queue_id];
-		err = gve_adminq_register_page_list(hw, txq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+
+		txq->qpl = gve_setup_queue_page_list(hw, queue_id, false,
+						     hw->tx_pages_per_qpl);
+		if (!txq->qpl) {
+			PMD_DRV_LOG(ERR, "Failed to alloc tx qpl for queue %hu.",
+				    queue_id);
 			goto err_iov_ring;
 		}
 	}
@@ -641,7 +644,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX resource");
 		err = -ENOMEM;
-		goto err_iov_ring;
+		goto err_qpl;
 	}
 	txq->qres = (struct gve_queue_resources *)mz->addr;
 	txq->qres_mz = mz;
@@ -651,7 +654,11 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	dev->data->tx_queues[queue_id] = txq;
 
 	return 0;
-
+err_qpl:
+	if (txq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, txq->qpl);
+		txq->qpl = NULL;
+	}
 err_iov_ring:
 	if (txq->is_gqi_qpl)
 		rte_free(txq->iov_ring);
-- 
2.45.2.627.g7a2c4fd464-goog


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH v2] net/gve: change QPLs to be queue resources
  2024-06-13 22:59 [PATCH] net/gve: change QPLs to be queue resources Joshua Washington
  2024-06-13 23:49 ` [PATCH v2] " Joshua Washington
@ 2024-06-13 23:53 ` Joshua Washington
  1 sibling, 0 replies; 3+ messages in thread
From: Joshua Washington @ 2024-06-13 23:53 UTC (permalink / raw)
  To: Thomas Monjalon, Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

Prior to this change, queue page lists (QPLs) were kept as device
resources, being stored in the gve_priv struct. This does not make
sense because each QPL inherently belongs to a single queue.

This change moves all QPL resources into the queues themselves, and
couples QPL allocation/registration and de-registration/deallocation
with the queue creation and destruction processes, respectively. Before
this change, QPL structs part of gve_priv were allocated as part of
driver initialization, which similarly does not make sense.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
---
 .mailmap                     |   2 +-
 drivers/net/gve/gve_ethdev.c | 159 ++++++++++++++++++++---------------
 drivers/net/gve/gve_ethdev.h |   9 +-
 drivers/net/gve/gve_rx.c     |  22 +++--
 drivers/net/gve/gve_tx.c     |  21 +++--
 5 files changed, 127 insertions(+), 86 deletions(-)

diff --git a/.mailmap b/.mailmap
index 6b396107d0..48af16edf5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -503,7 +503,7 @@ Harold Huang <baymaxhuang@gmail.com>
 Harrison McCullough <harrison_mccullough@labs.att.com>
 Harry van Haaren <harry.van.haaren@intel.com>
 Harshad Narayane <harshad.suresh.narayane@intel.com>
-Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
+Harshitha Ramamurthy <hramamurthy@google.com>
 Hasan Alayli <halayli@gmail.com>
 Hayato Momma <h-momma@ce.jp.nec.com>
 Heinrich Kuhn <heinrich.kuhn@corigine.com> <heinrich.kuhn@netronome.com>
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 475745b9c0..ca92277a68 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,67 +22,121 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }
 
-static int
-gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
+static struct gve_queue_page_list *
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
 {
-	char z_name[RTE_MEMZONE_NAMESIZE];
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
 	dma_addr_t page_bus;
 	uint32_t i;
 
-	if (priv->num_registered_pages + pages >
-	    priv->max_registered_pages) {
-		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
-			    priv->num_registered_pages + pages,
-			    priv->max_registered_pages);
-		return -EINVAL;
-	}
-	qpl = &priv->qpl[id];
-	snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
-	mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
+	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
+	if (!qpl)
+		return NULL;
+
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
 					 rte_socket_id(),
 					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
 	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
+		goto free_qpl_struct;
 	}
-	qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
+	qpl->page_buses = rte_zmalloc("qpl page buses",
+		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
-		return -ENOMEM;
+		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
+		goto free_qpl_memzone;
 	}
 	page_bus = mz->iova;
-	for (i = 0; i < pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		qpl->page_buses[i] = page_bus;
 		page_bus += PAGE_SIZE;
 	}
-	qpl->id = id;
 	qpl->mz = mz;
-	qpl->num_entries = pages;
-
-	priv->num_registered_pages += pages;
-
-	return 0;
+	qpl->num_entries = num_pages;
+	return qpl;
+
+free_qpl_memzone:
+	rte_memzone_free(qpl->mz);
+free_qpl_struct:
+	rte_free(qpl);
+	return NULL;
 }
 
 static void
-gve_free_qpls(struct gve_priv *priv)
+gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 {
-	uint16_t nb_txqs = priv->max_nb_txq;
-	uint16_t nb_rxqs = priv->max_nb_rxq;
-	uint32_t i;
+	if (qpl->mz) {
+		rte_memzone_free(qpl->mz);
+		qpl->mz = NULL;
+	}
+	if (qpl->page_buses) {
+		rte_free(qpl->page_buses);
+		qpl->page_buses = NULL;
+	}
+	rte_free(qpl);
+}
 
-	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
-		return;
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages)
+{
+	const char *queue_type_string = is_rx ? "rx" : "tx";
+	char qpl_name[RTE_MEMZONE_NAMESIZE];
+	struct gve_queue_page_list *qpl;
+	int err;
+
+	/* Allocate a new QPL. */
+	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
+		priv->pci_dev->device.name, queue_type_string, queue_id);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	if (!qpl) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to alloc %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		return NULL;
+	}
+
+	/* Assign the QPL an ID. */
+	qpl->id = queue_id;
+	if (is_rx)
+		qpl->id += priv->max_nb_txq;
 
-	for (i = 0; i < nb_txqs + nb_rxqs; i++) {
-		if (priv->qpl[i].mz != NULL)
-			rte_memzone_free(priv->qpl[i].mz);
-		rte_free(priv->qpl[i].page_buses);
+	/* Validate page registration limit and register QPLs. */
+	if (priv->num_registered_pages + qpl->num_entries >
+	    priv->max_registered_pages) {
+		PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64,
+			    priv->num_registered_pages + qpl->num_entries,
+			    priv->max_registered_pages);
+		goto cleanup_qpl;
+	}
+	err = gve_adminq_register_page_list(priv, qpl);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to register %s qpl for queue %hu.",
+			    queue_type_string, queue_id);
+		goto cleanup_qpl;
 	}
+	priv->num_registered_pages += qpl->num_entries;
+	return qpl;
 
-	rte_free(priv->qpl);
+cleanup_qpl:
+	gve_free_queue_page_list(qpl);
+	return NULL;
+}
+
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl)
+{
+	int err = gve_adminq_unregister_page_list(priv, qpl->id);
+	if (err) {
+		PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id);
+		return err;
+	}
+	priv->num_registered_pages -= qpl->num_entries;
+	gve_free_queue_page_list(qpl);
+	return 0;
 }
 
 static int
@@ -348,7 +402,6 @@ gve_dev_close(struct rte_eth_dev *dev)
 			gve_rx_queue_release_dqo(dev, i);
 	}
 
-	gve_free_qpls(priv);
 	rte_free(priv->adminq);
 
 	dev->data->mac_addrs = NULL;
@@ -1038,9 +1091,7 @@ gve_setup_device_resources(struct gve_priv *priv)
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
-	uint16_t pages;
 	int num_ntfy;
-	uint32_t i;
 	int err;
 
 	/* Set up the adminq */
@@ -1096,40 +1147,10 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 	PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d",
 		    priv->max_nb_txq, priv->max_nb_rxq);
 
-	/* In GQI_QPL queue format:
-	 * Allocate queue page lists according to max queue number
-	 * tx qpl id should start from 0 while rx qpl id should start
-	 * from priv->max_nb_txq
-	 */
-	if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
-		priv->qpl = rte_zmalloc("gve_qpl",
-					(priv->max_nb_txq + priv->max_nb_rxq) *
-					sizeof(struct gve_queue_page_list), 0);
-		if (priv->qpl == NULL) {
-			PMD_DRV_LOG(ERR, "Failed to alloc qpl.");
-			err = -ENOMEM;
-			goto free_adminq;
-		}
-
-		for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
-			if (i < priv->max_nb_txq)
-				pages = priv->tx_pages_per_qpl;
-			else
-				pages = priv->rx_data_slot_cnt;
-			err = gve_alloc_queue_page_list(priv, i, pages);
-			if (err != 0) {
-				PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
-				goto err_qpl;
-			}
-		}
-	}
-
 setup_device:
 	err = gve_setup_device_resources(priv);
 	if (!err)
 		return 0;
-err_qpl:
-	gve_free_qpls(priv);
 free_adminq:
 	gve_adminq_free(priv);
 	return err;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 9b19fc55e3..ca8c6404fd 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -288,8 +288,6 @@ struct gve_priv {
 	uint16_t max_mtu;
 	struct rte_ether_addr dev_addr; /* mac address */
 
-	struct gve_queue_page_list *qpl;
-
 	struct gve_tx_queue **txqs;
 	struct gve_rx_queue **rxqs;
 
@@ -416,6 +414,13 @@ gve_set_rx_function(struct rte_eth_dev *dev);
 void
 gve_set_tx_function(struct rte_eth_dev *dev);
 
+struct gve_queue_page_list *
+gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
+	uint32_t num_pages);
+int
+gve_teardown_queue_page_list(struct gve_priv *priv,
+	struct gve_queue_page_list *qpl);
+
 /* Below functions are used for DQO */
 
 int
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 36a1b73c65..41987ec870 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -279,7 +279,7 @@ gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		q->qpl = NULL;
 	}
 
@@ -382,11 +382,15 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	rxq->rx_data_ring = (union gve_rx_data_slot *)mz->addr;
 	rxq->data_mz = mz;
+
+	/* Allocate and register QPL for the queue. */
 	if (rxq->is_gqi_qpl) {
-		rxq->qpl = &hw->qpl[rxq->ntfy_id];
-		err = gve_adminq_register_page_list(hw, rxq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
+						     hw->rx_data_slot_cnt);
+		if (!rxq->qpl) {
+			PMD_DRV_LOG(ERR,
+				    "Failed to alloc rx qpl for queue %hu.",
+				    queue_id);
 			goto err_data_ring;
 		}
 	}
@@ -397,7 +401,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX resource");
 		err = -ENOMEM;
-		goto err_data_ring;
+		goto err_qpl;
 	}
 	rxq->qres = (struct gve_queue_resources *)mz->addr;
 	rxq->qres_mz = mz;
@@ -407,7 +411,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	dev->data->rx_queues[queue_id] = rxq;
 
 	return 0;
-
+err_qpl:
+	if (rxq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, rxq->qpl);
+		rxq->qpl = NULL;
+	}
 err_data_ring:
 	rte_memzone_free(rxq->data_mz);
 err_rx_ring:
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 2e0d001109..70d3ef060c 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -536,7 +536,7 @@ gve_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 		return;
 
 	if (q->is_gqi_qpl) {
-		gve_adminq_unregister_page_list(q->hw, q->qpl->id);
+		gve_teardown_queue_page_list(q->hw, q->qpl);
 		rte_free(q->iov_ring);
 		q->qpl = NULL;
 	}
@@ -619,6 +619,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	txq->tx_ring_phys_addr = mz->iova;
 	txq->mz = mz;
 
+	/* QPL-specific allocations. */
 	if (txq->is_gqi_qpl) {
 		txq->iov_ring = rte_zmalloc_socket("gve tx iov ring",
 						   sizeof(struct gve_tx_iovec) * nb_desc,
@@ -628,10 +629,12 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 			err = -ENOMEM;
 			goto err_tx_ring;
 		}
-		txq->qpl = &hw->qpl[queue_id];
-		err = gve_adminq_register_page_list(hw, txq->qpl);
-		if (err != 0) {
-			PMD_DRV_LOG(ERR, "Failed to register qpl %u", queue_id);
+
+		txq->qpl = gve_setup_queue_page_list(hw, queue_id, false,
+						     hw->tx_pages_per_qpl);
+		if (!txq->qpl) {
+			PMD_DRV_LOG(ERR, "Failed to alloc tx qpl for queue %hu.",
+				    queue_id);
 			goto err_iov_ring;
 		}
 	}
@@ -641,7 +644,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	if (mz == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX resource");
 		err = -ENOMEM;
-		goto err_iov_ring;
+		goto err_qpl;
 	}
 	txq->qres = (struct gve_queue_resources *)mz->addr;
 	txq->qres_mz = mz;
@@ -651,7 +654,11 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	dev->data->tx_queues[queue_id] = txq;
 
 	return 0;
-
+err_qpl:
+	if (txq->is_gqi_qpl) {
+		gve_teardown_queue_page_list(hw, txq->qpl);
+		txq->qpl = NULL;
+	}
 err_iov_ring:
 	if (txq->is_gqi_qpl)
 		rte_free(txq->iov_ring);
-- 
2.45.2.627.g7a2c4fd464-goog


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-06-13 23:53 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-06-13 22:59 [PATCH] net/gve: change QPLs to be queue resources Joshua Washington
2024-06-13 23:49 ` [PATCH v2] " Joshua Washington
2024-06-13 23:53 ` Joshua Washington

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).