From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v1 05/10] mempool/dpaa: fast acquire and release
Date: Wed, 28 May 2025 16:09:29 +0530 [thread overview]
Message-ID: <20250528103934.1001747-6-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20250528103934.1001747-1-vanshika.shukla@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
Use new BMan APIs to improve performance and support burst release.
Improve release performance ~90% by burst release.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/mempool/dpaa/dpaa_mempool.c | 85 ++++++++++++-----------------
drivers/mempool/dpaa/dpaa_mempool.h | 2 +-
2 files changed, 36 insertions(+), 51 deletions(-)
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index 7dacaa9513..6c850f5cb2 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -157,61 +157,46 @@ dpaa_mbuf_free_pool(struct rte_mempool *mp)
}
}
-static void
-dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
-{
- struct bm_buffer buf;
- int ret;
-
- DPAA_MEMPOOL_DPDEBUG("Free 0x%" PRIx64 " to bpid: %d",
- addr, bp_info->bpid);
-
- bm_buffer_set64(&buf, addr);
-retry:
- ret = bman_release(bp_info->bp, &buf, 1, 0);
- if (ret) {
- DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying...");
- cpu_spin(CPU_SPIN_BACKOFF_CYCLES);
- goto retry;
- }
-}
-
static int
dpaa_mbuf_free_bulk(struct rte_mempool *pool,
void *const *obj_table,
- unsigned int n)
+ unsigned int count)
{
struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool);
int ret;
- unsigned int i = 0;
+ uint32_t n = 0, i, left;
+ uint64_t phys[DPAA_MBUF_MAX_ACQ_REL];
DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
- n, bp_info->bpid);
+ count, bp_info->bpid);
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
ret);
- return 0;
+ return ret;
}
}
- while (i < n) {
- uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
-
- if (unlikely(!bp_info->ptov_off)) {
- /* buffers are from single mem segment */
- if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
- bp_info->ptov_off = (size_t)obj_table[i] - phy;
- rte_dpaa_bpid_info[bp_info->bpid].ptov_off
- = bp_info->ptov_off;
- }
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA_MBUF_MAX_ACQ_REL)
+ left = DPAA_MBUF_MAX_ACQ_REL;
+ else
+ left = count - n;
+
+ for (i = 0; i < left; i++) {
+ phys[i] = rte_mempool_virt2iova(obj_table[n]);
+ phys[i] += bp_info->meta_data_size;
+ n++;
}
-
- dpaa_buf_free(bp_info,
- (uint64_t)phy + bp_info->meta_data_size);
- i = i + 1;
+release_again:
+ ret = bman_release_fast(bp_info->bp, phys, left);
+ if (unlikely(ret))
+ goto release_again;
}
DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d",
@@ -226,9 +211,9 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
unsigned int count)
{
struct rte_mbuf **m = (struct rte_mbuf **)obj_table;
- struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL];
+ uint64_t bufs[DPAA_MBUF_MAX_ACQ_REL];
struct dpaa_bp_info *bp_info;
- void *bufaddr;
+ uint8_t *bufaddr;
int i, ret;
unsigned int n = 0;
@@ -240,7 +225,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) {
DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers",
count);
- return -1;
+ return -EINVAL;
}
if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
@@ -248,7 +233,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
ret);
- return -1;
+ return ret;
}
}
@@ -257,10 +242,11 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
* then the remainder.
*/
if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) {
- ret = bman_acquire(bp_info->bp, bufs,
- DPAA_MBUF_MAX_ACQ_REL, 0);
+ ret = bman_acquire_fast(bp_info->bp, bufs,
+ DPAA_MBUF_MAX_ACQ_REL);
} else {
- ret = bman_acquire(bp_info->bp, bufs, count - n, 0);
+ ret = bman_acquire_fast(bp_info->bp, bufs,
+ count - n);
}
/* In case of less than requested number of buffers available
* in pool, qbman_swp_acquire returns 0
@@ -275,16 +261,15 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
return -ENOBUFS;
}
/* assigning mbuf from the acquired objects */
- for (i = 0; (i < ret) && bufs[i].addr; i++) {
+ for (i = 0; (i < ret) && bufs[i]; i++) {
/* TODO-errata - observed that bufs may be null
* i.e. first buffer is valid, remaining 6 buffers
* may be null.
*/
- bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
- m[n] = (struct rte_mbuf *)((char *)bufaddr
- - bp_info->meta_data_size);
- DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
- (void *)bufaddr, (void *)m[n]);
+ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i]);
+ m[n] = (void *)(bufaddr - bp_info->meta_data_size);
+ DPAA_MEMPOOL_DPDEBUG("Vaddr(%p), mbuf(%p) from BMAN",
+ bufaddr, m[n]);
n++;
}
}
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 0877068fdd..5ca53c7ff9 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -26,7 +26,7 @@
#define DPAA_MAX_BPOOLS 256
/* Maximum release/acquire from BMAN */
-#define DPAA_MBUF_MAX_ACQ_REL 8
+#define DPAA_MBUF_MAX_ACQ_REL FSL_BM_BURST_MAX
/* Buffers are allocated from single mem segment i.e. phys contiguous */
#define DPAA_MPOOL_SINGLE_SEGMENT 0x01
--
2.25.1
next prev parent reply other threads:[~2025-05-28 10:40 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-28 10:39 [v1 00/10] DPAA specific fixes vanshika.shukla
2025-05-28 10:39 ` [v1 01/10] bus/dpaa: avoid using same structure and variable name vanshika.shukla
2025-05-28 10:39 ` [v1 02/10] bus/dpaa: add FMan node vanshika.shukla
2025-05-28 10:39 ` [v1 03/10] bus/dpaa: enhance DPAA SoC version vanshika.shukla
2025-05-28 14:28 ` Stephen Hemminger
2025-05-28 10:39 ` [v1 04/10] bus/dpaa: optimize bman acquire/release vanshika.shukla
2025-05-28 14:30 ` Stephen Hemminger
2025-05-28 14:50 ` [EXT] " Jun Yang
2025-05-28 10:39 ` vanshika.shukla [this message]
2025-05-28 10:39 ` [v1 06/10] mempool/dpaa: adjust pool element for LS1043A errata vanshika.shukla
2025-05-28 10:39 ` [v1 07/10] net/dpaa: add Tx rate limiting DPAA PMD API vanshika.shukla
2025-05-28 10:39 ` [v1 08/10] net/dpaa: add devargs for enabling err packets on main queue vanshika.shukla
2025-05-28 10:39 ` [v1 09/10] bus/dpaa: improve DPAA cleanup vanshika.shukla
2025-05-28 10:39 ` [v1 10/10] bus/dpaa: optimize qman enqueue check vanshika.shukla
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250528103934.1001747-6-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).