From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v3 10/10] bus/dpaa: optimize qman enqueue check
Date: Tue, 10 Jun 2025 14:44:11 +0530 [thread overview]
Message-ID: <20250610091411.2500413-11-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20250610091411.2500413-1-vanshika.shukla@nxp.com>
From: Hemant Agrawal <hemant.agrawal@nxp.com>
This patch improves data access during qman enequeue ring check.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
drivers/bus/dpaa/base/qbman/bman.c | 19 ++++++-------
drivers/bus/dpaa/base/qbman/qman.c | 41 ++++++++++++++++-------------
drivers/bus/dpaa/include/fsl_bman.h | 7 -----
drivers/bus/dpaa/include/fsl_qman.h | 2 +-
4 files changed, 33 insertions(+), 36 deletions(-)
diff --git a/drivers/bus/dpaa/base/qbman/bman.c b/drivers/bus/dpaa/base/qbman/bman.c
index 13f535a679..aa86fe7a20 100644
--- a/drivers/bus/dpaa/base/qbman/bman.c
+++ b/drivers/bus/dpaa/base/qbman/bman.c
@@ -275,7 +275,7 @@ bman_release_fast(struct bman_pool *pool, const uint64_t *bufs,
struct bm_rcr_entry *r;
uint8_t i, avail;
uint64_t bpid = pool->params.bpid;
- struct bm_hw_buf_desc bm_bufs[FSL_BM_BURST_MAX];
+ struct bm_buffer bm_bufs[FSL_BM_BURST_MAX];
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
if (!num || (num > FSL_BM_BURST_MAX))
@@ -297,11 +297,11 @@ bman_release_fast(struct bman_pool *pool, const uint64_t *bufs,
* with the valid-bit
*/
bm_bufs[0].bpid = bpid;
- bm_bufs[0].hi_addr = cpu_to_be16(HI16_OF_U48(bufs[0]));
- bm_bufs[0].lo_addr = cpu_to_be32(LO32_OF_U48(bufs[0]));
+ bm_bufs[0].hi = cpu_to_be16(HI16_OF_U48(bufs[0]));
+ bm_bufs[0].lo = cpu_to_be32(LO32_OF_U48(bufs[0]));
for (i = 1; i < num; i++) {
- bm_bufs[i].hi_addr = cpu_to_be16(HI16_OF_U48(bufs[i]));
- bm_bufs[i].lo_addr = cpu_to_be32(LO32_OF_U48(bufs[i]));
+ bm_bufs[i].hi = cpu_to_be16(HI16_OF_U48(bufs[i]));
+ bm_bufs[i].lo = cpu_to_be32(LO32_OF_U48(bufs[i]));
}
rte_memcpy(r->bufs, bm_bufs, sizeof(struct bm_buffer) * num);
@@ -363,12 +363,13 @@ bman_extract_addr(struct bm_buffer *buf)
}
static inline uint64_t
-bman_hw_extract_addr(struct bm_hw_buf_desc *buf)
+bman_hw_extract_addr(struct bm_buffer *buf)
{
uint64_t hi, lo;
- hi = be16_to_cpu(buf->hi_addr);
- lo = be32_to_cpu(buf->lo_addr);
+ hi = be16_to_cpu(buf->hi);
+ lo = be32_to_cpu(buf->lo);
+
return U48_BY_HI16_LO32(hi, lo);
}
@@ -380,7 +381,7 @@ bman_acquire_fast(struct bman_pool *pool, uint64_t *bufs, uint8_t num)
struct bm_mc_command *mcc;
struct bm_mc_result *mcr;
uint8_t i, rst;
- struct bm_hw_buf_desc bm_bufs[FSL_BM_BURST_MAX];
+ struct bm_buffer bm_bufs[FSL_BM_BURST_MAX];
#ifdef RTE_LIBRTE_DPAA_HWDEBUG
if (!num || (num > FSL_BM_BURST_MAX))
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index fbce0638b7..60087c55a1 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -1466,7 +1466,7 @@ int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
}
spin_lock_init(&fq->fqlock);
fq->fqid = fqid;
- fq->fqid_le = cpu_to_be32(fqid);
+ fq->fqid_be = cpu_to_be32(fqid);
fq->flags = flags;
fq->state = qman_fq_state_oos;
fq->cgr_groupid = 0;
@@ -2291,7 +2291,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
struct qm_portal *portal = &p->p;
register struct qm_eqcr *eqcr = &portal->eqcr;
- struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+ struct qm_eqcr_entry *eq = eqcr->cursor;
u8 i = 0, diff, old_ci, sent = 0;
@@ -2307,7 +2307,7 @@ int qman_enqueue_multi(struct qman_fq *fq,
/* try to send as many frames as possible */
while (eqcr->available && frames_to_send--) {
- eq->fqid = fq->fqid_le;
+ eq->fqid = fq->fqid_be;
eq->fd.opaque_addr = fd->opaque_addr;
eq->fd.addr = cpu_to_be40(fd->addr);
eq->fd.status = cpu_to_be32(fd->status);
@@ -2317,8 +2317,9 @@ int qman_enqueue_multi(struct qman_fq *fq,
((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
}
i++;
- eq = (void *)((unsigned long)(eq + 1) &
- (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eq++;
+ if (unlikely(eq >= (eqcr->ring + QM_EQCR_SIZE)))
+ eq = eqcr->ring;
eqcr->available--;
sent++;
fd++;
@@ -2332,11 +2333,11 @@ int qman_enqueue_multi(struct qman_fq *fq,
for (i = 0; i < sent; i++) {
eq->__dont_write_directly__verb =
QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
- prev_eq = eq;
- eq = (void *)((unsigned long)(eq + 1) &
- (~(unsigned long)(QM_EQCR_SIZE << 6)));
- if (unlikely((prev_eq + 1) != eq))
+ eq++;
+ if (unlikely(eq >= (eqcr->ring + QM_EQCR_SIZE))) {
eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ eq = eqcr->ring;
+ }
}
/* We need to flush all the lines but without load/store operations
@@ -2361,7 +2362,7 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
struct qm_portal *portal = &p->p;
register struct qm_eqcr *eqcr = &portal->eqcr;
- struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
+ struct qm_eqcr_entry *eq = eqcr->cursor;
u8 i = 0, diff, old_ci, sent = 0;
@@ -2377,7 +2378,7 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
/* try to send as many frames as possible */
while (eqcr->available && frames_to_send--) {
- eq->fqid = fq[sent]->fqid_le;
+ eq->fqid = fq[sent]->fqid_be;
eq->fd.opaque_addr = fd->opaque_addr;
eq->fd.addr = cpu_to_be40(fd->addr);
eq->fd.status = cpu_to_be32(fd->status);
@@ -2388,8 +2389,9 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
}
i++;
- eq = (void *)((unsigned long)(eq + 1) &
- (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eq++;
+ if (unlikely(eq >= (eqcr->ring + QM_EQCR_SIZE)))
+ eq = eqcr->ring;
eqcr->available--;
sent++;
fd++;
@@ -2403,11 +2405,11 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
for (i = 0; i < sent; i++) {
eq->__dont_write_directly__verb =
QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
- prev_eq = eq;
- eq = (void *)((unsigned long)(eq + 1) &
- (~(unsigned long)(QM_EQCR_SIZE << 6)));
- if (unlikely((prev_eq + 1) != eq))
+ eq++;
+ if (unlikely(eq >= (eqcr->ring + QM_EQCR_SIZE))) {
eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+ eq = eqcr->ring;
+ }
}
/* We need to flush all the lines but without load/store operations
@@ -2416,8 +2418,9 @@ qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
eq = eqcr->cursor;
for (i = 0; i < sent; i++) {
dcbf(eq);
- eq = (void *)((unsigned long)(eq + 1) &
- (~(unsigned long)(QM_EQCR_SIZE << 6)));
+ eq++;
+ if (unlikely(eq >= (eqcr->ring + QM_EQCR_SIZE)))
+ eq = eqcr->ring;
}
/* Update cursor for the next call */
eqcr->cursor = eq;
diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h
index 0107ced8c5..c3c6f84472 100644
--- a/drivers/bus/dpaa/include/fsl_bman.h
+++ b/drivers/bus/dpaa/include/fsl_bman.h
@@ -70,13 +70,6 @@ struct __rte_aligned(8) bm_buffer {
};
};
-struct __rte_packed_begin bm_hw_buf_desc {
- uint8_t rsv;
- uint8_t bpid;
- rte_be16_t hi_addr; /* High 16-bits of 48-bit address */
- rte_be32_t lo_addr; /* Low 32-bits of 48-bit address */
-} __rte_packed_end;
-
static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
{
return buf->addr;
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index b949f2c893..71d5b16878 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -1225,7 +1225,7 @@ struct qman_fq {
/* Caller of qman_create_fq() provides these demux callbacks */
struct qman_fq_cb cb;
- u32 fqid_le;
+ rte_be32_t fqid_be;
u32 fqid;
int q_fd;
--
2.25.1
next prev parent reply other threads:[~2025-06-10 9:15 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-28 10:39 [v1 00/10] DPAA specific fixes vanshika.shukla
2025-05-28 10:39 ` [v1 01/10] bus/dpaa: avoid using same structure and variable name vanshika.shukla
2025-05-28 10:39 ` [v1 02/10] bus/dpaa: add FMan node vanshika.shukla
2025-05-28 10:39 ` [v1 03/10] bus/dpaa: enhance DPAA SoC version vanshika.shukla
2025-05-28 14:28 ` Stephen Hemminger
2025-05-28 10:39 ` [v1 04/10] bus/dpaa: optimize bman acquire/release vanshika.shukla
2025-05-28 14:30 ` Stephen Hemminger
2025-05-28 14:50 ` [EXT] " Jun Yang
2025-05-28 10:39 ` [v1 05/10] mempool/dpaa: fast acquire and release vanshika.shukla
2025-05-28 10:39 ` [v1 06/10] mempool/dpaa: adjust pool element for LS1043A errata vanshika.shukla
2025-05-28 10:39 ` [v1 07/10] net/dpaa: add Tx rate limiting DPAA PMD API vanshika.shukla
2025-06-03 6:06 ` Varghese, Vipin
2025-05-28 10:39 ` [v1 08/10] net/dpaa: add devargs for enabling err packets on main queue vanshika.shukla
2025-05-28 10:39 ` [v1 09/10] bus/dpaa: improve DPAA cleanup vanshika.shukla
2025-05-28 10:39 ` [v1 10/10] bus/dpaa: optimize qman enqueue check vanshika.shukla
2025-06-02 13:15 ` [v2 00/10] DPAA specific fixes vanshika.shukla
2025-06-02 13:15 ` [v2 01/10] bus/dpaa: avoid using same structure and variable name vanshika.shukla
2025-06-02 13:15 ` [v2 02/10] bus/dpaa: add FMan node vanshika.shukla
2025-06-02 13:15 ` [v2 03/10] bus/dpaa: enhance DPAA SoC version vanshika.shukla
2025-06-02 13:15 ` [v2 04/10] bus/dpaa: optimize bman acquire/release vanshika.shukla
2025-06-02 13:15 ` [v2 05/10] mempool/dpaa: fast acquire and release vanshika.shukla
2025-06-02 13:15 ` [v2 06/10] mempool/dpaa: adjust pool element for LS1043A errata vanshika.shukla
2025-06-02 13:15 ` [v2 07/10] net/dpaa: add Tx rate limiting DPAA PMD API vanshika.shukla
2025-06-02 13:15 ` [v2 08/10] net/dpaa: add devargs for enabling err packets on main queue vanshika.shukla
2025-06-02 13:15 ` [v2 09/10] bus/dpaa: improve DPAA cleanup vanshika.shukla
2025-06-02 13:15 ` [v2 10/10] bus/dpaa: optimize qman enqueue check vanshika.shukla
2025-06-04 2:51 ` Stephen Hemminger
2025-06-10 9:14 ` [v3 00/10] DPAA specific fixes vanshika.shukla
2025-06-10 9:14 ` [v3 01/10] bus/dpaa: avoid using same structure and variable name vanshika.shukla
2025-06-10 9:14 ` [v3 02/10] bus/dpaa: add FMan node vanshika.shukla
2025-06-10 9:14 ` [v3 03/10] bus/dpaa: enhance DPAA SoC version vanshika.shukla
2025-06-10 9:14 ` [v3 04/10] bus/dpaa: optimize bman acquire/release vanshika.shukla
2025-06-10 9:14 ` [v3 05/10] mempool/dpaa: fast acquire and release vanshika.shukla
2025-06-10 9:14 ` [v3 06/10] mempool/dpaa: adjust pool element for LS1043A errata vanshika.shukla
2025-06-10 9:14 ` [v3 07/10] net/dpaa: add Tx rate limiting DPAA PMD API vanshika.shukla
2025-06-10 9:14 ` [v3 08/10] net/dpaa: add devargs for enabling err packets on main queue vanshika.shukla
2025-06-10 9:14 ` [v3 09/10] bus/dpaa: improve DPAA cleanup vanshika.shukla
2025-06-10 9:14 ` vanshika.shukla [this message]
2025-06-11 7:10 ` [v4 00/10] DPAA specific fixes vanshika.shukla
2025-06-11 7:10 ` [v4 01/10] bus/dpaa: avoid using same structure and variable name vanshika.shukla
2025-06-11 7:10 ` [v4 02/10] bus/dpaa: add FMan node vanshika.shukla
2025-06-11 16:50 ` Stephen Hemminger
2025-06-11 7:10 ` [v4 03/10] bus/dpaa: enhance DPAA SoC version vanshika.shukla
2025-06-11 7:10 ` [v4 04/10] bus/dpaa: optimize bman acquire/release vanshika.shukla
2025-06-11 7:10 ` [v4 05/10] mempool/dpaa: fast acquire and release vanshika.shukla
2025-06-11 7:10 ` [v4 06/10] mempool/dpaa: adjust pool element for LS1043A errata vanshika.shukla
2025-06-11 7:10 ` [v4 07/10] net/dpaa: add Tx rate limiting DPAA PMD API vanshika.shukla
2025-06-11 7:10 ` [v4 08/10] net/dpaa: add devargs for enabling err packets on main queue vanshika.shukla
2025-06-11 7:10 ` [v4 09/10] bus/dpaa: improve DPAA cleanup vanshika.shukla
2025-06-11 7:10 ` [v4 10/10] bus/dpaa: optimize qman enqueue check vanshika.shukla
2025-06-11 16:15 ` Stephen Hemminger
2025-06-11 20:04 ` Patrick Robb
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250610091411.2500413-11-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).