From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 74C0745EE2; Thu, 19 Dec 2024 00:46:39 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 49DA540299; Thu, 19 Dec 2024 00:46:39 +0100 (CET) Received: from mail-pf1-f201.google.com (mail-pf1-f201.google.com [209.85.210.201]) by mails.dpdk.org (Postfix) with ESMTP id 050AB400EF for ; Thu, 19 Dec 2024 00:46:37 +0100 (CET) Received: by mail-pf1-f201.google.com with SMTP id d2e1a72fcca58-725e59e41d2so205872b3a.0 for ; Wed, 18 Dec 2024 15:46:37 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1734565597; x=1735170397; darn=dpdk.org; h=cc:to:from:subject:message-id:mime-version:date:from:to:cc:subject :date:message-id:reply-to; bh=6W4ZIximH7EHjmoS2ImeTaw05CT8KdGYgBytOKvd+BA=; b=JKQpgAR6jUDhm21hQ6Je0H1basfGe3V+A1AxJxxxTFKOXBeMjFn9zBogU9Je21ex1q yDxHii+ORUuosSC+eApoFa904P1AacL0a9EisKZ09Apow80iZv/tzHPLgoM3BFG1s2vR Ul7JUWFGKEGys/bRJ1P0ZTnkjbQ/hBAcy0uvlBwo57S4zp0L2SHz2qGHopitJjD9nwcf kOoqIxZEcJk24sf6zQMrkpRGEnbFdydSABJhuVEv13kMl9xLB+Ffd/37AVMrKmo8IT1n tv3JCB7fqi83W5V94FYV6BHRA9UNWeSrneguIAAg+y0xHAOWIPgAVTX0V4mT0gzvLCEg c0UQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1734565597; x=1735170397; h=cc:to:from:subject:message-id:mime-version:date:x-gm-message-state :from:to:cc:subject:date:message-id:reply-to; bh=6W4ZIximH7EHjmoS2ImeTaw05CT8KdGYgBytOKvd+BA=; b=GL7GCB5fESBgaaB1cG7IPljqot8XuVtMpun+Sy+b3nkvObugB316kTqVrZWPnn8tFY PXT4t2nu1usWiIu90D3w1vKH9VkQQxnH/UH86zPJVpUKaQlJb+gtlsZmfBqc96RgOZto LllTTlTkmAxYjR+LV836XEONo5JZFguUt0lxioUfIvFfO+2YgBi8FsB0C9fQ/vvJrs9W Y5z3ePAelSUvRqHHGK6yUYPpvbMDWAYhu+l0gy+/gYgvrYdqx/3QK9YRH02FLNNdlWIH +KpLh9O5B8Xrb9wtQ5nl1a4zhfTum9qiT9uBIzyiIdQ4h1/9yEkf2XmDE3qZ1c/gCJ+K 392g== X-Gm-Message-State: AOJu0YxYIenO9aM8S0/HTShi2UfVfcT8ndJYmYnBh6tsnsUFIkWVzxpV EyLA2jK7UOO7NXuk9wk9wr2E7P4nchRc5pZ4v4UjF1OcJqpQOEYb6TEW4U06fBjqXHCF2c+psxp TPF935+b8fw== X-Google-Smtp-Source: AGHT+IGlcP61fASH8sQz/qgrUvSUZPPGzgRHkBrb7tfgZ9GCLxuVMY1o61A0+66tdzvacpih1JOZYbWyJhfMgA== X-Received: from pfhz15.prod.google.com ([2002:a05:6a00:240f:b0:726:d6e6:a38]) (user=joshwash job=prod-delivery.src-stubby-dispatcher) by 2002:a05:6a21:8dc4:b0:1e1:9fef:e95c with SMTP id adf61e73a8af0-1e5b47fc5c5mr7997585637.12.1734565596781; Wed, 18 Dec 2024 15:46:36 -0800 (PST) Date: Wed, 18 Dec 2024 15:46:35 -0800 Mime-Version: 1.0 X-Mailer: git-send-email 2.47.1.613.gc27f4b7a9f-goog Message-ID: <20241218234635.2009033-1-joshwash@google.com> Subject: [PATCH] net/gve: Allocate qpl pages using malloc if memzone allocation fails From: Joshua Washington To: Jeroen de Borst , Rushil Gupta , Joshua Washington Cc: dev@dpdk.org, Ferruh Yigit , Praveen Kaligineedi , Praveen Kaligineedi Content-Type: text/plain; charset="UTF-8" X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Praveen Kaligineedi Allocating QPL for an RX queue might fail if enough contiguous IOVA memory cannot be allocated. However, the only requirement for QPL for RX is that each 4K buffer be IOVA contiguous, not the entire QPL. Therefore, use malloc to allocate 4K buffers if the allocation using memzone fails. Signed-off-by: Praveen Kaligineedi Reviewed-by: Joshua Washington --- drivers/net/gve/gve_ethdev.c | 105 ++++++++++++++++++++++++++++++++++++++-------- drivers/net/gve/gve_ethdev.h | 1 + drivers/net/gve/gve_rx.c | 2 +- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index db4ebe7..09304ef 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -22,42 +22,100 @@ gve_write_version(uint8_t *driver_version_register) writeb('\n', driver_version_register); } +static const struct rte_memzone * +gve_alloc_using_mz(const char *name, uint32_t num_pages) +{ + const struct rte_memzone *mz; + mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE, + rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE); + if (mz == NULL) + PMD_DRV_LOG(ERR, "Failed to alloc memzone %s.", name); + return mz; +} + +static int +gve_alloc_using_malloc(void **bufs, uint32_t num_entries) +{ + uint32_t i; + + for (i = 0; i < num_entries; i++) { + bufs[i] = rte_malloc_socket(NULL, PAGE_SIZE, PAGE_SIZE, rte_socket_id()); + if (bufs[i] == NULL) { + PMD_DRV_LOG(ERR, "Failed to malloc"); + goto free_bufs; + } + } + return 0; + +free_bufs: + while (i > 0) + rte_free(bufs[--i]); + + return -ENOMEM; +} + static struct gve_queue_page_list * -gve_alloc_queue_page_list(const char *name, uint32_t num_pages) +gve_alloc_queue_page_list(const char *name, uint32_t num_pages, bool is_rx) { struct gve_queue_page_list *qpl; const struct rte_memzone *mz; - dma_addr_t page_bus; uint32_t i; qpl = rte_zmalloc("qpl struct", sizeof(struct gve_queue_page_list), 0); if (!qpl) return NULL; - mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE, - rte_socket_id(), - RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE); - if (mz == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc %s.", name); - goto free_qpl_struct; - } qpl->page_buses = rte_zmalloc("qpl page buses", num_pages * sizeof(dma_addr_t), 0); if (qpl->page_buses == NULL) { PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses"); - goto free_qpl_memzone; + goto free_qpl_struct; } - page_bus = mz->iova; - for (i = 0; i < num_pages; i++) { - qpl->page_buses[i] = page_bus; - page_bus += PAGE_SIZE; + + qpl->qpl_bufs = rte_zmalloc("qpl bufs", + num_pages * sizeof(void *), 0); + if (qpl->qpl_bufs == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc qpl bufs"); + goto free_qpl_page_buses; } - qpl->mz = mz; + + mz = gve_alloc_using_mz(name, num_pages); + if (mz) { + qpl->mz = mz; + + /* Populate the buffer addresses */ + for (i = 0; i < num_pages; i++) + qpl->qpl_bufs[i] = + (void *)((uint64_t)(mz->addr) + i * PAGE_SIZE); + + /* Populate the IOVA addresses */ + for (i = 0; i < num_pages; i++) + qpl->page_buses[i] = mz->iova + i * PAGE_SIZE; + } else { + /* Allocate 4K size buffers. + * Cannot use non-contiguous approach for tx fifo. + */ + if (!is_rx) + goto free_qpl_page_bufs; + + PMD_DRV_LOG(ERR, "Allocating bufs using malloc for %s ", name); + if (gve_alloc_using_malloc(qpl->qpl_bufs, num_pages)) + goto free_qpl_page_bufs; + + /* Populate the IOVA addresses */ + for (i = 0; i < num_pages; i++) + qpl->page_buses[i] = + rte_malloc_virt2iova(qpl->qpl_bufs[i]); + } + qpl->num_entries = num_pages; return qpl; -free_qpl_memzone: - rte_memzone_free(qpl->mz); +free_qpl_page_bufs: + rte_free(qpl->qpl_bufs); +free_qpl_page_buses: + rte_free(qpl->page_buses); free_qpl_struct: rte_free(qpl); return NULL; @@ -69,7 +127,18 @@ gve_free_queue_page_list(struct gve_queue_page_list *qpl) if (qpl->mz) { rte_memzone_free(qpl->mz); qpl->mz = NULL; + } else if (qpl->qpl_bufs) { + uint32_t i; + + for (i = 0; i < qpl->num_entries; i++) + rte_free(qpl->qpl_bufs[i]); + } + + if (qpl->qpl_bufs) { + rte_free(qpl->qpl_bufs); + qpl->qpl_bufs = NULL; } + if (qpl->page_buses) { rte_free(qpl->page_buses); qpl->page_buses = NULL; @@ -89,7 +158,7 @@ gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx, /* Allocate a new QPL. */ snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d", priv->pci_dev->device.name, queue_type_string, queue_id); - qpl = gve_alloc_queue_page_list(qpl_name, num_pages); + qpl = gve_alloc_queue_page_list(qpl_name, num_pages, is_rx); if (!qpl) { PMD_DRV_LOG(ERR, "Failed to alloc %s qpl for queue %hu.", diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h index c417a0b..a011a0c 100644 --- a/drivers/net/gve/gve_ethdev.h +++ b/drivers/net/gve/gve_ethdev.h @@ -63,6 +63,7 @@ struct gve_queue_page_list { uint32_t num_entries; dma_addr_t *page_buses; /* the dma addrs of the pages */ const struct rte_memzone *mz; + void **qpl_bufs; /* qpl-buffer list */ }; /* A TX desc ring entry */ diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 89b6ef3..e73704c 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -117,7 +117,7 @@ gve_rx_mbuf(struct gve_rx_queue *rxq, struct rte_mbuf *rxe, uint16_t len, rxq->ctx.mbuf_tail = rxe; } if (rxq->is_gqi_qpl) { - addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + padding; + addr = (uint64_t)rxq->qpl->qpl_bufs[rx_id] + padding; rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off), (void *)(size_t)addr, len); } -- 2.47.1.613.gc27f4b7a9f-goog