DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
@ 2024-12-18 23:46 Joshua Washington
  2024-12-19  3:51 ` Stephen Hemminger
  2024-12-23 20:16 ` [PATCH v2] " Joshua Washington
  0 siblings, 2 replies; 6+ messages in thread
From: Joshua Washington @ 2024-12-18 23:46 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Praveen Kaligineedi, Praveen Kaligineedi

From: Praveen Kaligineedi <pkaligineedi@google.com>

Allocating QPL for an RX queue might fail if enough contiguous IOVA
memory cannot be allocated. However, the only requirement for QPL
for RX is that each 4K buffer be IOVA contiguous, not the entire
QPL. Therefore, use malloc to allocate 4K buffers if the allocation
using memzone fails.

Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
 drivers/net/gve/gve_ethdev.c | 105 ++++++++++++++++++++++++++++++++++++++--------
 drivers/net/gve/gve_ethdev.h |   1 +
 drivers/net/gve/gve_rx.c     |   2 +-
 3 files changed, 89 insertions(+), 19 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index db4ebe7..09304ef 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,42 +22,100 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }

+static const struct rte_memzone *
+gve_alloc_using_mz(const char *name, uint32_t num_pages)
+{
+	const struct rte_memzone *mz;
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
+					 rte_socket_id(),
+					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
+	if (mz == NULL)
+		PMD_DRV_LOG(ERR, "Failed to alloc memzone %s.", name);
+	return mz;
+}
+
+static int
+gve_alloc_using_malloc(void **bufs, uint32_t num_entries)
+{
+	uint32_t i;
+
+	for (i = 0; i < num_entries; i++) {
+		bufs[i] = rte_malloc_socket(NULL, PAGE_SIZE, PAGE_SIZE, rte_socket_id());
+		if (bufs[i] == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to malloc");
+			goto free_bufs;
+		}
+	}
+	return 0;
+
+free_bufs:
+	while (i > 0)
+		rte_free(bufs[--i]);
+
+	return -ENOMEM;
+}
+
 static struct gve_queue_page_list *
-gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages, bool is_rx)
 {
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
-	dma_addr_t page_bus;
 	uint32_t i;

 	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
 	if (!qpl)
 		return NULL;

-	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
-					 rte_socket_id(),
-					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
-	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
-		goto free_qpl_struct;
-	}
 	qpl->page_buses = rte_zmalloc("qpl page buses",
 		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
-		goto free_qpl_memzone;
+		goto free_qpl_struct;
 	}
-	page_bus = mz->iova;
-	for (i = 0; i < num_pages; i++) {
-		qpl->page_buses[i] = page_bus;
-		page_bus += PAGE_SIZE;
+
+	qpl->qpl_bufs = rte_zmalloc("qpl bufs",
+		num_pages * sizeof(void *), 0);
+	if (qpl->qpl_bufs == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to alloc qpl bufs");
+		goto free_qpl_page_buses;
 	}
-	qpl->mz = mz;
+
+	mz = gve_alloc_using_mz(name, num_pages);
+	if (mz) {
+		qpl->mz = mz;
+
+		/* Populate the buffer addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->qpl_bufs[i] =
+				(void *)((uint64_t)(mz->addr) + i * PAGE_SIZE);
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] = mz->iova + i * PAGE_SIZE;
+	} else {
+		/* Allocate 4K size buffers.
+		 * Cannot use non-contiguous approach for tx fifo.
+		 */
+		if (!is_rx)
+			goto free_qpl_page_bufs;
+
+		PMD_DRV_LOG(ERR, "Allocating bufs using malloc for %s ", name);
+		if (gve_alloc_using_malloc(qpl->qpl_bufs, num_pages))
+			goto free_qpl_page_bufs;
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] =
+				rte_malloc_virt2iova(qpl->qpl_bufs[i]);
+	}
+
 	qpl->num_entries = num_pages;
 	return qpl;

-free_qpl_memzone:
-	rte_memzone_free(qpl->mz);
+free_qpl_page_bufs:
+	rte_free(qpl->qpl_bufs);
+free_qpl_page_buses:
+	rte_free(qpl->page_buses);
 free_qpl_struct:
 	rte_free(qpl);
 	return NULL;
@@ -69,7 +127,18 @@ gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 	if (qpl->mz) {
 		rte_memzone_free(qpl->mz);
 		qpl->mz = NULL;
+	} else if (qpl->qpl_bufs) {
+		uint32_t i;
+
+		for (i = 0; i < qpl->num_entries; i++)
+			rte_free(qpl->qpl_bufs[i]);
+	}
+
+	if (qpl->qpl_bufs) {
+		rte_free(qpl->qpl_bufs);
+		qpl->qpl_bufs = NULL;
 	}
+
 	if (qpl->page_buses) {
 		rte_free(qpl->page_buses);
 		qpl->page_buses = NULL;
@@ -89,7 +158,7 @@ gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
 	/* Allocate a new QPL. */
 	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
 		priv->pci_dev->device.name, queue_type_string, queue_id);
-	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages, is_rx);
 	if (!qpl) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to alloc %s qpl for queue %hu.",
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index c417a0b..a011a0c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -63,6 +63,7 @@ struct gve_queue_page_list {
 	uint32_t num_entries;
 	dma_addr_t *page_buses; /* the dma addrs of the pages */
 	const struct rte_memzone *mz;
+	void **qpl_bufs; /* qpl-buffer list */
 };

 /* A TX desc ring entry */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 89b6ef3..e73704c 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -117,7 +117,7 @@ gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
 		rxq->ctx.mbuf_tail = rxe;
 	}
 	if (rxq->is_gqi_qpl) {
-		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
+		addr = (uint64_t)rxq->qpl->qpl_bufs[rx_id] + padding;
 		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
 				    (void *)(size_t)addr, len);
 	}
--
2.47.1.613.gc27f4b7a9f-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
  2024-12-18 23:46 [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails Joshua Washington
@ 2024-12-19  3:51 ` Stephen Hemminger
  2024-12-19 20:53   ` Praveen Kaligineedi
  2024-12-23 20:16 ` [PATCH v2] " Joshua Washington
  1 sibling, 1 reply; 6+ messages in thread
From: Stephen Hemminger @ 2024-12-19  3:51 UTC (permalink / raw)
  To: Joshua Washington
  Cc: Jeroen de Borst, Rushil Gupta, dev, Ferruh Yigit,
	Praveen Kaligineedi, Praveen Kaligineedi

On Wed, 18 Dec 2024 15:46:35 -0800
Joshua Washington <joshwash@google.com> wrote:

> From: Praveen Kaligineedi <pkaligineedi@google.com>
> 
> Allocating QPL for an RX queue might fail if enough contiguous IOVA
> memory cannot be allocated. However, the only requirement for QPL
> for RX is that each 4K buffer be IOVA contiguous, not the entire
> QPL. Therefore, use malloc to allocate 4K buffers if the allocation
> using memzone fails.
> 
> Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
> Reviewed-by: Joshua Washington <joshwash@google.com>
> ---

Why keep the memzone code? rte_malloc and memzone are both coming from
huge pages. Is there any advantage to memzone for what you are doing?

Better to not have two potential allocation paths to test.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
  2024-12-19  3:51 ` Stephen Hemminger
@ 2024-12-19 20:53   ` Praveen Kaligineedi
  2024-12-19 22:34     ` Stephen Hemminger
  2024-12-20  0:04     ` Stephen Hemminger
  0 siblings, 2 replies; 6+ messages in thread
From: Praveen Kaligineedi @ 2024-12-19 20:53 UTC (permalink / raw)
  To: stephen
  Cc: Joshua Washington, Jeroen de Borst, Rushil Gupta, dev,
	Ferruh Yigit, Praveen Kaligineedi

The TX queue requires IOVA contiguous QPL memory.  So, we still need
memzone code for TX queues.

Regards,
Praveen

On Wed, Dec 18, 2024 at 7:51 PM Stephen Hemminger
<stephen@networkplumber.org> wrote:
>
> On Wed, 18 Dec 2024 15:46:35 -0800
> Joshua Washington <joshwash@google.com> wrote:
>
> > From: Praveen Kaligineedi <pkaligineedi@google.com>
> >
> > Allocating QPL for an RX queue might fail if enough contiguous IOVA
> > memory cannot be allocated. However, the only requirement for QPL
> > for RX is that each 4K buffer be IOVA contiguous, not the entire
> > QPL. Therefore, use malloc to allocate 4K buffers if the allocation
> > using memzone fails.
> >
> > Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
> > Reviewed-by: Joshua Washington <joshwash@google.com>
> > ---
>
> Why keep the memzone code? rte_malloc and memzone are both coming from
> huge pages. Is there any advantage to memzone for what you are doing?
>
> Better to not have two potential allocation paths to test.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
  2024-12-19 20:53   ` Praveen Kaligineedi
@ 2024-12-19 22:34     ` Stephen Hemminger
  2024-12-20  0:04     ` Stephen Hemminger
  1 sibling, 0 replies; 6+ messages in thread
From: Stephen Hemminger @ 2024-12-19 22:34 UTC (permalink / raw)
  To: Praveen Kaligineedi
  Cc: Joshua Washington, Jeroen de Borst, Rushil Gupta, dev,
	Ferruh Yigit, Praveen Kaligineedi

On Thu, 19 Dec 2024 12:53:14 -0800
Praveen Kaligineedi <pkaligineedi@google.com> wrote:

> The TX queue requires IOVA contiguous QPL memory.  So, we still need
> memzone code for TX queues.
> 
> Regards,
> Praveen
> 
> On Wed, Dec 18, 2024 at 7:51 PM Stephen Hemminger
> <stephen@networkplumber.org> wrote:
> >
> > On Wed, 18 Dec 2024 15:46:35 -0800
> > Joshua Washington <joshwash@google.com> wrote:
> >  
> > > From: Praveen Kaligineedi <pkaligineedi@google.com>
> > >
> > > Allocating QPL for an RX queue might fail if enough contiguous IOVA
> > > memory cannot be allocated. However, the only requirement for QPL
> > > for RX is that each 4K buffer be IOVA contiguous, not the entire
> > > QPL. Therefore, use malloc to allocate 4K buffers if the allocation
> > > using memzone fails.
> > >
> > > Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
> > > Reviewed-by: Joshua Washington <joshwash@google.com>
> > > ---  
> >
> > Why keep the memzone code? rte_malloc and memzone are both coming from
> > huge pages. Is there any advantage to memzone for what you are doing?
> >
> > Better to not have two potential allocation paths to test.  


So then use rte_malloc for Rx queue and rte_memzone for Tx queue
and document rationale.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails
  2024-12-19 20:53   ` Praveen Kaligineedi
  2024-12-19 22:34     ` Stephen Hemminger
@ 2024-12-20  0:04     ` Stephen Hemminger
  1 sibling, 0 replies; 6+ messages in thread
From: Stephen Hemminger @ 2024-12-20  0:04 UTC (permalink / raw)
  To: Praveen Kaligineedi
  Cc: Joshua Washington, Jeroen de Borst, Rushil Gupta, dev,
	Ferruh Yigit, Praveen Kaligineedi

On Thu, 19 Dec 2024 12:53:14 -0800
Praveen Kaligineedi <pkaligineedi@google.com> wrote:

> The TX queue requires IOVA contiguous QPL memory.  So, we still need
> memzone code for TX queues.
> 
> Regards,
> Praveen
> 
> On Wed, Dec 18, 2024 at 7:51 PM Stephen Hemminger
> <stephen@networkplumber.org> wrote:
> >
> > On Wed, 18 Dec 2024 15:46:35 -0800
> > Joshua Washington <joshwash@google.com> wrote:
> >  
> > > From: Praveen Kaligineedi <pkaligineedi@google.com>
> > >
> > > Allocating QPL for an RX queue might fail if enough contiguous IOVA
> > > memory cannot be allocated. However, the only requirement for QPL
> > > for RX is that each 4K buffer be IOVA contiguous, not the entire
> > > QPL. Therefore, use malloc to allocate 4K buffers if the allocation
> > > using memzone fails.
> > >
> > > Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
> > > Reviewed-by: Joshua Washington <joshwash@google.com>
> > > ---  
> >
> > Why keep the memzone code? rte_malloc and memzone are both coming from
> > huge pages. Is there any advantage to memzone for what you are doing?
> >
> > Better to not have two potential allocation paths to test.  

Please fix email address typo in next version. I am getting bounces
because it has
       Praveen Kaligineedi <pkaligineei@google.com>
in Cc:
Looks like you meant:
	Praveen Kaligineedi <pkaligineedi@google.com>

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2] net/gve: Allocate qpl pages using malloc if memzone allocation fails
  2024-12-18 23:46 [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails Joshua Washington
  2024-12-19  3:51 ` Stephen Hemminger
@ 2024-12-23 20:16 ` Joshua Washington
  1 sibling, 0 replies; 6+ messages in thread
From: Joshua Washington @ 2024-12-23 20:16 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Praveen Kaligineedi

From: Praveen Kaligineedi <pkaligineedi@google.com>

Allocating QPL for an RX queue might fail if enough contiguous IOVA
memory cannot be allocated. However, the only requirement for QPL
for RX is that each 4K buffer be IOVA contiguous, not the entire
QPL. Therefore, use malloc to allocate 4K buffers if the allocation
using memzone fails.

Use memzone based allocation for TX since TX queue requires IOVA
contiguous QPL memory.

Google-Bug-Id: 372857163
Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
 drivers/net/gve/gve_ethdev.c | 102 ++++++++++++++++++++++++++++-------
 drivers/net/gve/gve_ethdev.h |   5 +-
 drivers/net/gve/gve_rx.c     |   2 +-
 3 files changed, 89 insertions(+), 20 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index db4ebe7036..e471a34e61 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -22,42 +22,97 @@ gve_write_version(uint8_t *driver_version_register)
 	writeb('\n', driver_version_register);
 }
 
+static const struct rte_memzone *
+gve_alloc_using_mz(const char *name, uint32_t num_pages)
+{
+	const struct rte_memzone *mz;
+	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
+					 rte_socket_id(),
+					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
+	if (mz == NULL)
+		PMD_DRV_LOG(ERR, "Failed to alloc memzone %s.", name);
+	return mz;
+}
+
+static int
+gve_alloc_using_malloc(void **bufs, uint32_t num_entries)
+{
+	uint32_t i;
+
+	for (i = 0; i < num_entries; i++) {
+		bufs[i] = rte_malloc_socket(NULL, PAGE_SIZE, PAGE_SIZE, rte_socket_id());
+		if (bufs[i] == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to malloc");
+			goto free_bufs;
+		}
+	}
+	return 0;
+
+free_bufs:
+	while (i > 0)
+		rte_free(bufs[--i]);
+
+	return -ENOMEM;
+}
+
 static struct gve_queue_page_list *
-gve_alloc_queue_page_list(const char *name, uint32_t num_pages)
+gve_alloc_queue_page_list(const char *name, uint32_t num_pages, bool is_rx)
 {
 	struct gve_queue_page_list *qpl;
 	const struct rte_memzone *mz;
-	dma_addr_t page_bus;
 	uint32_t i;
 
 	qpl = rte_zmalloc("qpl struct",	sizeof(struct gve_queue_page_list), 0);
 	if (!qpl)
 		return NULL;
 
-	mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
-					 rte_socket_id(),
-					 RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
-	if (mz == NULL) {
-		PMD_DRV_LOG(ERR, "Failed to alloc %s.", name);
-		goto free_qpl_struct;
-	}
 	qpl->page_buses = rte_zmalloc("qpl page buses",
 		num_pages * sizeof(dma_addr_t), 0);
 	if (qpl->page_buses == NULL) {
 		PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses");
-		goto free_qpl_memzone;
+		goto free_qpl_struct;
 	}
-	page_bus = mz->iova;
-	for (i = 0; i < num_pages; i++) {
-		qpl->page_buses[i] = page_bus;
-		page_bus += PAGE_SIZE;
+
+	if (is_rx) {
+		/* RX QPL need not be IOVA contiguous.
+		 * Allocate 4K size buffers using malloc
+		 */
+		qpl->qpl_bufs = rte_zmalloc("qpl bufs",
+			num_pages * sizeof(void *), 0);
+		if (qpl->qpl_bufs == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to alloc qpl bufs");
+			goto free_qpl_page_buses;
+		}
+
+		if (gve_alloc_using_malloc(qpl->qpl_bufs, num_pages))
+			goto free_qpl_page_bufs;
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] =
+				rte_malloc_virt2iova(qpl->qpl_bufs[i]);
+	} else {
+		/* TX QPL needs to be IOVA contiguous
+		 * Allocate QPL using memzone
+		 */
+		mz = gve_alloc_using_mz(name, num_pages);
+		if (!mz)
+			goto free_qpl_page_buses;
+
+		qpl->mz = mz;
+
+		/* Populate the IOVA addresses */
+		for (i = 0; i < num_pages; i++)
+			qpl->page_buses[i] = mz->iova + i * PAGE_SIZE;
 	}
-	qpl->mz = mz;
+
 	qpl->num_entries = num_pages;
 	return qpl;
 
-free_qpl_memzone:
-	rte_memzone_free(qpl->mz);
+free_qpl_page_bufs:
+	rte_free(qpl->qpl_bufs);
+free_qpl_page_buses:
+	rte_free(qpl->page_buses);
 free_qpl_struct:
 	rte_free(qpl);
 	return NULL;
@@ -69,7 +124,18 @@ gve_free_queue_page_list(struct gve_queue_page_list *qpl)
 	if (qpl->mz) {
 		rte_memzone_free(qpl->mz);
 		qpl->mz = NULL;
+	} else if (qpl->qpl_bufs) {
+		uint32_t i;
+
+		for (i = 0; i < qpl->num_entries; i++)
+			rte_free(qpl->qpl_bufs[i]);
+	}
+
+	if (qpl->qpl_bufs) {
+		rte_free(qpl->qpl_bufs);
+		qpl->qpl_bufs = NULL;
 	}
+
 	if (qpl->page_buses) {
 		rte_free(qpl->page_buses);
 		qpl->page_buses = NULL;
@@ -89,7 +155,7 @@ gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx,
 	/* Allocate a new QPL. */
 	snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d",
 		priv->pci_dev->device.name, queue_type_string, queue_id);
-	qpl = gve_alloc_queue_page_list(qpl_name, num_pages);
+	qpl = gve_alloc_queue_page_list(qpl_name, num_pages, is_rx);
 	if (!qpl) {
 		PMD_DRV_LOG(ERR,
 			    "Failed to alloc %s qpl for queue %hu.",
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index c417a0b31c..35cb9062b1 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -62,7 +62,10 @@ struct gve_queue_page_list {
 	uint32_t id; /* unique id */
 	uint32_t num_entries;
 	dma_addr_t *page_buses; /* the dma addrs of the pages */
-	const struct rte_memzone *mz;
+	union {
+		const struct rte_memzone *mz; /* memzone allocated for TX queue */
+		void **qpl_bufs; /* RX qpl-buffer list allocated using malloc*/
+	};
 };
 
 /* A TX desc ring entry */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 1f5fa3f1da..7a91c31ad2 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -117,7 +117,7 @@ gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len,
 		rxq->ctx.mbuf_tail = rxe;
 	}
 	if (rxq->is_gqi_qpl) {
-		addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding;
+		addr = (uint64_t)rxq->qpl->qpl_bufs[rx_id] + padding;
 		rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
 				    (void *)(size_t)addr, len);
 	}
-- 
2.47.1.613.gc27f4b7a9f-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-12-23 20:16 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-12-18 23:46 [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails Joshua Washington
2024-12-19  3:51 ` Stephen Hemminger
2024-12-19 20:53   ` Praveen Kaligineedi
2024-12-19 22:34     ` Stephen Hemminger
2024-12-20  0:04     ` Stephen Hemminger
2024-12-23 20:16 ` [PATCH v2] " Joshua Washington

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).