From: Shani Peretz <shperetz@nvidia.com>
To: <dev@dpdk.org>
Cc: Shani Peretz <shperetz@nvidia.com>,
Dariusz Sosnowski <dsosnowski@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
"Bing Zhao" <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Subject: [RFC PATCH 3/5] net/mlx5: mark an operation in mempool object's history
Date: Mon, 16 Jun 2025 10:29:08 +0300 [thread overview]
Message-ID: <20250616072910.113042-4-shperetz@nvidia.com> (raw)
In-Reply-To: <20250616072910.113042-1-shperetz@nvidia.com>
record operations on mempool objects when it is allocated
and released inside the mlx5 PMD.
Signed-off-by: Shani Peretz <shperetz@nvidia.com>
---
drivers/net/mlx5/mlx5_rx.c | 9 +++++++++
drivers/net/mlx5/mlx5_rx.h | 2 ++
drivers/net/mlx5/mlx5_rxq.c | 9 +++++++--
drivers/net/mlx5/mlx5_rxtx_vec.c | 6 ++++++
drivers/net/mlx5/mlx5_tx.h | 7 +++++++
drivers/net/mlx5/mlx5_txq.c | 1 +
6 files changed, 32 insertions(+), 2 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 5f4a93fe8c..a86ed2180e 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -560,12 +560,15 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
elt_idx = (elts_ci + i) & e_mask;
elt = &(*rxq->elts)[elt_idx];
*elt = rte_mbuf_raw_alloc(rxq->mp);
+ rte_mempool_history_mark(*elt, RTE_MEMPOOL_PMD_ALLOC);
if (!*elt) {
for (i--; i >= 0; --i) {
elt_idx = (elts_ci +
i) & elts_n;
elt = &(*rxq->elts)
[elt_idx];
+ rte_mempool_history_mark(*elt,
+ RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg
(*elt);
}
@@ -952,6 +955,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_prefetch0(wqe);
/* Allocate the buf from the same pool. */
rep = rte_mbuf_raw_alloc(seg->pool);
+ rte_mempool_history_mark(rep, RTE_MEMPOOL_PMD_ALLOC);
if (unlikely(rep == NULL)) {
++rxq->stats.rx_nombuf;
if (!pkt) {
@@ -966,6 +970,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rep = NEXT(pkt);
NEXT(pkt) = NULL;
NB_SEGS(pkt) = 1;
+ rte_mempool_history_mark(pkt, RTE_MEMPOOL_PMD_FREE);
rte_mbuf_raw_free(pkt);
pkt = rep;
}
@@ -979,6 +984,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask, &mcqe, &skip_cnt, false);
if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
/* We drop packets with non-critical errors */
+ rte_mempool_history_mark(rep, RTE_MEMPOOL_PMD_FREE);
rte_mbuf_raw_free(rep);
if (len == MLX5_CRITICAL_ERROR_CQE_RET) {
rq_ci = rxq->rq_ci << sges_n;
@@ -992,6 +998,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
continue;
}
if (len == 0) {
+ rte_mempool_history_mark(rep, RTE_MEMPOOL_PMD_FREE);
rte_mbuf_raw_free(rep);
break;
}
@@ -1268,6 +1275,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
++rxq->stats.rx_nombuf;
break;
}
+ rte_mempool_history_mark(pkt, RTE_MEMPOOL_PMD_ALLOC);
len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
if (rxq->crc_present)
@@ -1275,6 +1283,7 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
strd_idx, strd_cnt);
if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
+ rte_mempool_history_mark(pkt, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(pkt);
if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
++rxq->stats.idropped;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 6380895502..db4ef10ca1 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -516,6 +516,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
if (unlikely(next == NULL))
return MLX5_RXQ_CODE_NOMBUF;
+ rte_mempool_history_mark(next, RTE_MEMPOOL_PMD_ALLOC);
NEXT(prev) = next;
SET_DATA_OFF(next, 0);
addr = RTE_PTR_ADD(addr, seg_len);
@@ -579,6 +580,7 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
if (unlikely(seg == NULL))
return MLX5_RXQ_CODE_NOMBUF;
+ rte_mempool_history_mark(seg, RTE_MEMPOOL_PMD_ALLOC);
SET_DATA_OFF(seg, 0);
rte_memcpy(rte_pktmbuf_mtod(seg, void *),
RTE_PTR_ADD(addr, len - hdrm_overlap),
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f5df451a32..e95bef9d55 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -164,6 +164,7 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
rte_errno = ENOMEM;
goto error;
}
+ rte_mempool_history_mark(buf, RTE_MEMPOOL_PMD_ALLOC);
/* Only vectored Rx routines rely on headroom size. */
MLX5_ASSERT(!has_vec_support ||
DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
@@ -221,8 +222,10 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
err = rte_errno; /* Save rte_errno before cleanup. */
elts_n = i;
for (i = 0; (i != elts_n); ++i) {
- if ((*rxq_ctrl->rxq.elts)[i] != NULL)
+ if ((*rxq_ctrl->rxq.elts)[i] != NULL) {
+ rte_mempool_history_mark((*rxq_ctrl->rxq.elts)[i], RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
+ }
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
if (rxq_ctrl->share_group == 0)
@@ -324,8 +327,10 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
rxq->rq_pi = elts_ci;
}
for (i = 0; i != q_n; ++i) {
- if ((*rxq->elts)[i] != NULL)
+ if ((*rxq->elts)[i] != NULL) {
+ rte_mempool_history_mark((*rxq->elts)[i], RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg((*rxq->elts)[i]);
+ }
(*rxq->elts)[i] = NULL;
}
}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 1b701801c5..ffaa10c547 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -64,6 +64,7 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
#ifdef MLX5_PMD_SOFT_COUNTERS
err_bytes += PKT_LEN(pkt);
#endif
+ rte_mempool_history_mark(pkt, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(pkt);
} else {
pkts[n++] = pkt;
@@ -107,6 +108,7 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
rxq->stats.rx_nombuf += n;
return;
}
+ rte_mempool_history_bulk((void *)elts, n, RTE_MEMPOOL_PMD_ALLOC);
if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) {
for (i = 0; i < n; ++i) {
/*
@@ -171,6 +173,7 @@ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
rxq->stats.rx_nombuf += n;
return;
}
+ rte_mempool_history_bulk((void *)elts, n, RTE_MEMPOOL_PMD_ALLOC);
rxq->elts_ci += n;
/* Prevent overflowing into consumed mbufs. */
elts_idx = rxq->elts_ci & wqe_mask;
@@ -224,6 +227,7 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
if (!elts[i]->pkt_len) {
rxq->consumed_strd = strd_n;
+ rte_mempool_history_mark(elts[i], RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(elts[i]);
#ifdef MLX5_PMD_SOFT_COUNTERS
rxq->stats.ipackets -= 1;
@@ -236,6 +240,7 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
buf, rxq->consumed_strd, strd_cnt);
rxq->consumed_strd += strd_cnt;
if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
+ rte_mempool_history_mark(elts[i], RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(elts[i]);
#ifdef MLX5_PMD_SOFT_COUNTERS
rxq->stats.ipackets -= 1;
@@ -586,6 +591,7 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
rte_io_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
} while (tn != pkts_n);
+
return tn;
}
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 55568c41b1..7b61d87120 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -553,6 +553,7 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
mbuf = *pkts;
pool = mbuf->pool;
+ rte_mempool_history_bulk((void *)pkts, pkts_n, RTE_MEMPOOL_PMD_FREE);
rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
return;
}
@@ -608,6 +609,7 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
* Free the array of pre-freed mbufs
* belonging to the same memory pool.
*/
+ rte_mempool_history_bulk((void *)p_free, n_free, RTE_MEMPOOL_PMD_FREE);
rte_mempool_put_bulk(pool, (void *)p_free, n_free);
if (unlikely(mbuf != NULL)) {
/* There is the request to start new scan. */
@@ -1223,6 +1225,7 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
/* Exhausted packet, just free. */
mbuf = loc->mbuf;
loc->mbuf = mbuf->next;
+ rte_mempool_history_mark(mbuf, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
MLX5_ASSERT(loc->mbuf_nseg > 1);
@@ -1265,6 +1268,7 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
/* Exhausted packet, just free. */
mbuf = loc->mbuf;
loc->mbuf = mbuf->next;
+ rte_mempool_history_mark(mbuf, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
MLX5_ASSERT(loc->mbuf_nseg >= 1);
@@ -1715,6 +1719,7 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
/* Zero length segment found, just skip. */
mbuf = loc->mbuf;
loc->mbuf = loc->mbuf->next;
+ rte_mempool_history_mark(mbuf, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(mbuf);
if (--loc->mbuf_nseg == 0)
break;
@@ -2018,6 +2023,7 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
wqe->cseg.sq_ds -= RTE_BE32(1);
mbuf = loc->mbuf;
loc->mbuf = mbuf->next;
+ rte_mempool_history_mark(mbuf, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(mbuf);
if (--nseg == 0)
break;
@@ -3317,6 +3323,7 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
* Packet data are completely inlined,
* free the packet immediately.
*/
+ rte_mempool_history_mark(loc->mbuf, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(loc->mbuf);
} else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
MLX5_TXOFF_CONFIG(MPW)) &&
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 5fee5bc4e8..156f8c2ef8 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -78,6 +78,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
MLX5_ASSERT(elt != NULL);
+ rte_mempool_history_mark(elt, RTE_MEMPOOL_PMD_FREE);
rte_pktmbuf_free_seg(elt);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Poisoning. */
--
2.34.1
next prev parent reply other threads:[~2025-06-16 7:30 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-16 7:29 [RFC PATCH 0/5] Introduce mempool object new debug capabilities Shani Peretz
2025-06-16 7:29 ` [RFC PATCH 1/5] mempool: record mempool objects operations history Shani Peretz
2025-06-16 7:29 ` [RFC PATCH 2/5] drivers: add mempool history compilation flag Shani Peretz
2025-06-16 7:29 ` Shani Peretz [this message]
2025-06-16 7:29 ` [RFC PATCH 4/5] app/testpmd: add testpmd command to dump mempool history Shani Peretz
2025-06-16 7:29 ` [RFC PATCH 5/5] usertool: add a script to parse mempool history dump Shani Peretz
2025-06-16 15:30 ` [RFC PATCH 0/5] Introduce mempool object new debug capabilities Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250616072910.113042-4-shperetz@nvidia.com \
--to=shperetz@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).