DPDK patches and discussions
 help / color / mirror / Atom feed
From: Joshua Washington <joshwash@google.com>
To: Jeroen de Borst <jeroendb@google.com>,
	Rushil Gupta <rushilg@google.com>,
	 Joshua Washington <joshwash@google.com>
Cc: dev@dpdk.org, Ferruh Yigit <ferruh.yigit@amd.com>,
	 Praveen Kaligineedi <pkaligineedi@google.com>
Subject: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
Date: Mon, 23 Dec 2024 12:11:33 -0800	[thread overview]
Message-ID: <20241223201133.4185508-1-joshwash@google.com> (raw)

From: Praveen Kaligineedi <pkaligineedi@google.com>

Allocating QPL for an RX queue might fail if enough contiguous IOVA
memory cannot be allocated. However, the only requirement for QPL
for RX is that each 4K buffer be IOVA contiguous, not the entire
QPL. Therefore, use malloc to allocate 4K buffers if the allocation
using memzone fails.

Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
 drivers/net/gve/gve_ethdev.c | 105 ++++++++++++++++++++++++++++++++++++++--------
 drivers/net/gve/gve_ethdev.h |   1 +
 drivers/net/gve/gve_rx.c     |   2 +-
 3 files changed, 89 insertions(+), 19 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index db4ebe7..09304ef 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,42 +22,100 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }

+static const struct rte_memzone *
+gve_alloc_using_mz(const char *name, uint32_t num_pages)
+{
+	const struct rte_memzone *mz;
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
+					 rte_socket_id(),
+					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
+	if (mz == NULL)
+		PMD_DRV_LOG(ERR, "Failed to alloc memzone %s.", name);
+	return mz;
+}
+
+static int
+gve_alloc_using_malloc(void **bufs, uint32_t num_entries)
+{
+	uint32_t i;
+
+	for (i = 0; i < num_entries; i++) {
+		bufs[i] = rte_malloc_socket(NULL, PAGE_SIZE, PAGE_SIZE, rte_socket_id());
+		if (bufs[i] == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to malloc");
+			goto free_bufs;
+		}
+	}
+	return 0;
+
+free_bufs:
+	while (i > 0)
+		rte_free(bufs[--i]);
+
+	return -ENOMEM;
+}
+
 static struct gve_queue_page_list *
-gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages, bool is_rx)
 {
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
-	dma_addr_t page_bus;
 	uint32_t i;

 	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
 	if (!qpl)
 		return NULL;

-	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
-					 rte_socket_id(),
-					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
-	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
-		goto free_qpl_struct;
-	}
 	qpl->page_buses = rte_zmalloc("qpl page buses",
 		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
-		goto free_qpl_memzone;
+		goto free_qpl_struct;
 	}
-	page_bus = mz->iova;
-	for (i = 0; i < num_pages; i++) {
-		qpl->page_buses[i] = page_bus;
-		page_bus += PAGE_SIZE;
+
+	qpl->qpl_bufs = rte_zmalloc("qpl bufs",
+		num_pages * sizeof(void *), 0);
+	if (qpl->qpl_bufs == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc qpl bufs");
+		goto free_qpl_page_buses;
 	}
-	qpl->mz = mz;
+
+	mz = gve_alloc_using_mz(name, num_pages);
+	if (mz) {
+		qpl->mz = mz;
+
+		/* Populate the buffer addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->qpl_bufs[i] =
+				(void *)((uint64_t)(mz->addr) + i * PAGE_SIZE);
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] = mz->iova + i * PAGE_SIZE;
+	} else {
+		/* Allocate 4K size buffers.
+		 * Cannot use non-contiguous approach for tx fifo.
+		 */
+		if (!is_rx)
+			goto free_qpl_page_bufs;
+
+		PMD_DRV_LOG(ERR, "Allocating bufs using malloc for %s ", name);
+		if (gve_alloc_using_malloc(qpl->qpl_bufs, num_pages))
+			goto free_qpl_page_bufs;
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] =
+				rte_malloc_virt2iova(qpl->qpl_bufs[i]);
+	}
+
 	qpl->num_entries = num_pages;
 	return qpl;

-free_qpl_memzone:
-	rte_memzone_free(qpl->mz);
+free_qpl_page_bufs:
+	rte_free(qpl->qpl_bufs);
+free_qpl_page_buses:
+	rte_free(qpl->page_buses);
 free_qpl_struct:
 	rte_free(qpl);
 	return NULL;
@@ -69,7 +127,18 @@ gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 	if (qpl->mz) {
 		rte_memzone_free(qpl->mz);
 		qpl->mz = NULL;
+	} else if (qpl->qpl_bufs) {
+		uint32_t i;
+
+		for (i = 0; i < qpl->num_entries; i++)
+			rte_free(qpl->qpl_bufs[i]);
+	}
+
+	if (qpl->qpl_bufs) {
+		rte_free(qpl->qpl_bufs);
+		qpl->qpl_bufs = NULL;
 	}
+
 	if (qpl->page_buses) {
 		rte_free(qpl->page_buses);
 		qpl->page_buses = NULL;
@@ -89,7 +158,7 @@ gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
 	/* Allocate a new QPL. */
 	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
 		priv->pci_dev->device.name, queue_type_string, queue_id);
-	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages, is_rx);
 	if (!qpl) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to alloc %s qpl for queue %hu.",
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index c417a0b..a011a0c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -63,6 +63,7 @@ struct gve_queue_page_list {
 	uint32_t num_entries;
 	dma_addr_t *page_buses; /* the dma addrs of the pages */
 	const struct rte_memzone *mz;
+	void **qpl_bufs; /* qpl-buffer list */
 };

 /* A TX desc ring entry */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 89b6ef3..e73704c 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -117,7 +117,7 @@ gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
 		rxq->ctx.mbuf_tail = rxe;
 	}
 	if (rxq->is_gqi_qpl) {
-		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
+		addr = (uint64_t)rxq->qpl->qpl_bufs[rx_id] + padding;
 		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
 				    (void *)(size_t)addr, len);
 	}
--
2.47.1.613.gc27f4b7a9f-goog


             reply	other threads:[~2024-12-23 20:11 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-23 20:11 Joshua Washington [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-12-18 23:46 Joshua Washington
2024-12-19  3:51 ` Stephen Hemminger
2024-12-19 20:53   ` Praveen Kaligineedi
2024-12-19 22:34     ` Stephen Hemminger
2024-12-20  0:04     ` Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241223201133.4185508-1-joshwash@google.com \
    --to=joshwash@google.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=jeroendb@google.com \
    --cc=pkaligineedi@google.com \
    --cc=rushilg@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).