From: Ed Czeck <ed.czeck@atomicrules.com>
To: dev@dpdk.org, stephen@networkplumber.org
Cc: Shepard Siegel <shepard.siegel@atomicrules.com>,
John Miller <john.miller@atomicrules.com>
Subject: [PATCH v2 4/4] net/ark: improve Rx queue recovery after mbuf exhaustion
Date: Wed, 10 Sep 2025 15:04:36 -0400 [thread overview]
Message-ID: <20250910190436.995899-4-ed.czeck@atomicrules.com> (raw)
In-Reply-To: <20250910190436.995899-1-ed.czeck@atomicrules.com>
use 4-K page aligned buffers to reduce PCIe requests
reduce message spew
attempt to allocate smaller chunks of buffers during starvation
Signed-off-by: Ed Czeck <ed.czeck@atomicrules.com>
---
v2:
- reduced message to single line.
- Added comments on buffer alignment. PCIe devices deal with page
size of 4096 bytes. By aligning this buffer to a 4K boundary, we
will reduce the number of PCIe read requests.
---
drivers/net/ark/ark_ethdev_rx.c | 34 ++++++++++++++++++++++-----------
1 file changed, 23 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 1b5c4b64a4..74f6d70d1e 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -42,6 +42,7 @@ struct __rte_cache_aligned ark_rx_queue {
rx_user_meta_hook_fn rx_user_meta_hook;
void *ext_user_data;
+ uint32_t starvation;
uint32_t dataroom;
uint32_t headroom;
@@ -57,8 +58,6 @@ struct __rte_cache_aligned ark_rx_queue {
/* The queue Index is used within the dpdk device structures */
uint16_t queue_index;
- uint32_t unused;
-
/* next cache line - fields written by device */
alignas(RTE_CACHE_LINE_MIN_SIZE) RTE_MARKER cacheline1;
@@ -187,10 +186,11 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
nb_desc * sizeof(struct rte_mbuf *),
512,
socket_id);
+ /* Align buffer to PCIe's page size of 4K to reduce upstream read requests from FPGA */
queue->paddress_q =
rte_zmalloc_socket("Ark_rx_queue paddr",
nb_desc * sizeof(rte_iova_t),
- 512,
+ 4096,
socket_id);
if (queue->reserve_q == 0 || queue->paddress_q == 0) {
@@ -265,6 +265,9 @@ eth_ark_recv_pkts(void *rx_queue,
return 0;
if (unlikely(nb_pkts == 0))
return 0;
+ if (unlikely(queue->starvation))
+ eth_ark_rx_seed_mbufs(queue);
+
prod_index = queue->prod_index;
cons_index = queue->cons_index;
if (prod_index == cons_index)
@@ -453,7 +456,7 @@ eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
static inline int
eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
{
- uint32_t limit = (queue->cons_index & ~(ARK_RX_MPU_CHUNK - 1)) +
+ uint32_t limit = RTE_ALIGN_FLOOR(queue->cons_index, ARK_RX_MPU_CHUNK) +
queue->queue_size;
uint32_t seed_index = queue->seed_index;
@@ -461,23 +464,32 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
uint32_t seed_m = queue->seed_index & queue->queue_mask;
uint32_t nb = limit - seed_index;
+ int status;
/* Handle wrap around -- remainder is filled on the next call */
if (unlikely(seed_m + nb > queue->queue_size))
nb = queue->queue_size - seed_m;
struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
- int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+ do {
+ status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+ if (status == 0)
+ break;
+ /* Try again with a smaller request, keeping aligned with chunk size */
+ nb = RTE_ALIGN_FLOOR(nb / 2, ARK_RX_MPU_CHUNK);
+ } while (nb >= ARK_RX_MPU_CHUNK);
if (unlikely(status != 0)) {
- ARK_PMD_LOG(NOTICE,
- "Could not allocate %u mbufs from pool"
- " for RX queue %u;"
- " %u free buffers remaining in queue\n",
- nb, queue->queue_index,
- queue->seed_index - queue->cons_index);
+ if (queue->starvation == 0) {
+ ARK_PMD_LOG(NOTICE,
+ "Could not allocate %u mbufs from pool for RX queue %u; %u free buffers remaining\n",
+ ARK_RX_MPU_CHUNK, queue->queue_index,
+ queue->seed_index - queue->cons_index);
+ queue->starvation = 1;
+ }
return -1;
}
+ queue->starvation = 0;
if (ARK_DEBUG_CORE) { /* DEBUG */
while (count != nb) {
--
2.34.1
next prev parent reply other threads:[~2025-09-10 19:05 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-03 21:28 [PATCH 1/4] net/ark: add PCIe IDS for newly supported devices Ed Czeck
2025-09-03 21:28 ` [PATCH 2/4] net/ark: remove double mbuf frees Ed Czeck
2025-09-03 21:28 ` [PATCH 3/4] net/ark: improve ring handling for segmented packets Ed Czeck
2025-09-03 21:28 ` [PATCH 4/4] net/ark: improve Rx queue recovery after mbuf exhaustion Ed Czeck
2025-09-06 4:51 ` Stephen Hemminger
2025-09-10 0:33 ` Stephen Hemminger
2025-09-10 0:33 ` Stephen Hemminger
2025-09-10 19:04 ` [PATCH v2 1/4] net/ark: add PCIe IDS for newly supported devices Ed Czeck
2025-09-10 19:04 ` [PATCH v2 2/4] net/ark: remove double mbuf frees Ed Czeck
2025-09-10 19:04 ` [PATCH v2 3/4] net/ark: improve ring handling for segmented packets Ed Czeck
2025-09-10 19:04 ` Ed Czeck [this message]
2025-09-10 18:57 [PATCH v2 1/4] net/ark: add PCIe IDS for newly supported devices Ed Czeck
2025-09-10 18:57 ` [PATCH v2 4/4] net/ark: improve Rx queue recovery after mbuf exhaustion Ed Czeck
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250910190436.995899-4-ed.czeck@atomicrules.com \
--to=ed.czeck@atomicrules.com \
--cc=dev@dpdk.org \
--cc=john.miller@atomicrules.com \
--cc=shepard.siegel@atomicrules.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).