DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shani Peretz <shperetz@nvidia.com>
To: <dev@dpdk.org>
Cc: <mb@smartsharesystems.com>, <stephen@networkplumber.org>,
	<bruce.richardson@intel.com>, <ajit.khaparde@broadcom.com>,
	<jerinj@marvell.com>, <konstantin.v.ananyev@yandex.ru>,
	<david.marchand@redhat.com>, <maxime.coquelin@redhat.com>,
	<gakhil@marvell.com>, <viacheslavo@nvidia.com>,
	<thomas@monjalon.net>, "Shani Peretz" <shperetz@nvidia.com>,
	Dariusz Sosnowski <dsosnowski@nvidia.com>,
	"Bing Zhao" <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
	Suanming Mou <suanmingm@nvidia.com>,
	Matan Azrad <matan@nvidia.com>
Subject: [PATCH v2 2/4] net/mlx5: mark an operation in mbuf's history
Date: Tue, 16 Sep 2025 18:12:05 +0300	[thread overview]
Message-ID: <20250916151207.556618-3-shperetz@nvidia.com> (raw)
In-Reply-To: <20250916151207.556618-1-shperetz@nvidia.com>

record operations on mbufs when it is allocated
and released inside the mlx5 PMD.

Signed-off-by: Shani Peretz <shperetz@nvidia.com>
---
 drivers/net/mlx5/mlx5_rx.c       | 25 +++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rx.h       |  6 ++++++
 drivers/net/mlx5/mlx5_rxq.c      | 15 +++++++++++++--
 drivers/net/mlx5/mlx5_rxtx_vec.c | 16 ++++++++++++++++
 drivers/net/mlx5/mlx5_tx.h       | 21 +++++++++++++++++++++
 drivers/net/mlx5/mlx5_txq.c      |  3 +++
 6 files changed, 84 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 420a03068d..4e44892d93 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -640,12 +640,19 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
 					elt_idx = (elts_ci + i) & e_mask;
 					elt = &(*rxq->elts)[elt_idx];
 					*elt = rte_mbuf_raw_alloc(rxq->mp);
+#if RTE_MBUF_HISTORY_DEBUG
+					rte_mbuf_history_mark(*elt, RTE_MBUF_PMD_ALLOC);
+#endif
 					if (!*elt) {
 						for (i--; i >= 0; --i) {
 							elt_idx = (elts_ci +
 								   i) & elts_n;
 							elt = &(*rxq->elts)
 								[elt_idx];
+#if RTE_MBUF_HISTORY_DEBUG
+							rte_mbuf_history_mark(*elt,
+								RTE_MBUF_PMD_FREE);
+#endif
 							rte_pktmbuf_free_seg
 								(*elt);
 						}
@@ -1048,6 +1055,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		rte_prefetch0(wqe);
 		/* Allocate the buf from the same pool. */
 		rep = rte_mbuf_raw_alloc(seg->pool);
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_mark(rep, RTE_MBUF_PMD_ALLOC);
+#endif
 		if (unlikely(rep == NULL)) {
 			++rxq->stats.rx_nombuf;
 			if (!pkt) {
@@ -1062,6 +1072,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				rep = NEXT(pkt);
 				NEXT(pkt) = NULL;
 				NB_SEGS(pkt) = 1;
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE);
+#endif
 				rte_mbuf_raw_free(pkt);
 				pkt = rep;
 			}
@@ -1076,6 +1089,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 					       &mcqe, &skip_cnt, false, NULL);
 			if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
 				/* We drop packets with non-critical errors */
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(rep, RTE_MBUF_PMD_FREE);
+#endif
 				rte_mbuf_raw_free(rep);
 				if (len == MLX5_CRITICAL_ERROR_CQE_RET) {
 					rq_ci = rxq->rq_ci << sges_n;
@@ -1089,6 +1105,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				continue;
 			}
 			if (len == 0) {
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(rep, RTE_MBUF_PMD_FREE);
+#endif
 				rte_mbuf_raw_free(rep);
 				break;
 			}
@@ -1540,6 +1559,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			++rxq->stats.rx_nombuf;
 			break;
 		}
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_ALLOC);
+#endif
 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
 		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
 		if (rxq->crc_present)
@@ -1547,6 +1569,9 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
 					   strd_idx, strd_cnt);
 		if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(pkt);
 			if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
 				++rxq->stats.idropped;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 7be31066a5..075b4bfc4b 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -525,6 +525,9 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
 
 				if (unlikely(next == NULL))
 					return MLX5_RXQ_CODE_NOMBUF;
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(next, RTE_MBUF_PMD_ALLOC);
+#endif
 				NEXT(prev) = next;
 				SET_DATA_OFF(next, 0);
 				addr = RTE_PTR_ADD(addr, seg_len);
@@ -588,6 +591,9 @@ mprq_buf_to_pkt(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt, uint32_t len,
 
 			if (unlikely(seg == NULL))
 				return MLX5_RXQ_CODE_NOMBUF;
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(seg, RTE_MBUF_PMD_ALLOC);
+#endif
 			SET_DATA_OFF(seg, 0);
 			rte_memcpy(rte_pktmbuf_mtod(seg, void *),
 				RTE_PTR_ADD(addr, len - hdrm_overlap),
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index aeefece8c1..434a57ca32 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -164,6 +164,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 			rte_errno = ENOMEM;
 			goto error;
 		}
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_mark(buf, RTE_MBUF_PMD_ALLOC);
+#endif
 		/* Only vectored Rx routines rely on headroom size. */
 		MLX5_ASSERT(!has_vec_support ||
 			    DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
@@ -221,8 +224,12 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 	err = rte_errno; /* Save rte_errno before cleanup. */
 	elts_n = i;
 	for (i = 0; (i != elts_n); ++i) {
-		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
+		if ((*rxq_ctrl->rxq.elts)[i] != NULL) {
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark((*rxq_ctrl->rxq.elts)[i], RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
+		}
 		(*rxq_ctrl->rxq.elts)[i] = NULL;
 	}
 	if (rxq_ctrl->share_group == 0)
@@ -324,8 +331,12 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 		rxq->rq_pi = elts_ci;
 	}
 	for (i = 0; i != q_n; ++i) {
-		if ((*rxq->elts)[i] != NULL)
+		if ((*rxq->elts)[i] != NULL) {
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark((*rxq->elts)[i], RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg((*rxq->elts)[i]);
+		}
 		(*rxq->elts)[i] = NULL;
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 1b701801c5..c7ca808f43 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -63,6 +63,9 @@ rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 		if (pkt->packet_type == RTE_PTYPE_ALL_MASK || rxq->err_state) {
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			err_bytes += PKT_LEN(pkt);
+#endif
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(pkt, RTE_MBUF_PMD_FREE);
 #endif
 			rte_pktmbuf_free_seg(pkt);
 		} else {
@@ -107,6 +110,9 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
 			rxq->stats.rx_nombuf += n;
 			return;
 		}
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_bulk(elts, n, RTE_MBUF_PMD_ALLOC);
+#endif
 		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1)) {
 			for (i = 0; i < n; ++i) {
 				/*
@@ -171,6 +177,9 @@ mlx5_rx_mprq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq)
 			rxq->stats.rx_nombuf += n;
 			return;
 		}
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_bulk(elts, n, RTE_MBUF_PMD_ALLOC);
+#endif
 		rxq->elts_ci += n;
 		/* Prevent overflowing into consumed mbufs. */
 		elts_idx = rxq->elts_ci & wqe_mask;
@@ -224,6 +233,9 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
 
 		if (!elts[i]->pkt_len) {
 			rxq->consumed_strd = strd_n;
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(elts[i], RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(elts[i]);
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			rxq->stats.ipackets -= 1;
@@ -236,6 +248,9 @@ rxq_copy_mprq_mbuf_v(struct mlx5_rxq_data *rxq,
 					   buf, rxq->consumed_strd, strd_cnt);
 		rxq->consumed_strd += strd_cnt;
 		if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(elts[i], RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(elts[i]);
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			rxq->stats.ipackets -= 1;
@@ -586,6 +601,7 @@ mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		rte_io_wmb();
 		*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	} while (tn != pkts_n);
+
 	return tn;
 }
 
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 16307206e2..c3d69942a8 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -555,6 +555,9 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
 	if (!MLX5_TXOFF_CONFIG(MULTI) && txq->fast_free) {
 		mbuf = *pkts;
 		pool = mbuf->pool;
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_bulk(pkts, pkts_n, RTE_MBUF_PMD_FREE);
+#endif
 		rte_mempool_put_bulk(pool, (void *)pkts, pkts_n);
 		return;
 	}
@@ -610,6 +613,9 @@ mlx5_tx_free_mbuf(struct mlx5_txq_data *__rte_restrict txq,
 			 * Free the array of pre-freed mbufs
 			 * belonging to the same memory pool.
 			 */
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_bulk(p_free, n_free, RTE_MBUF_PMD_FREE);
+#endif
 			rte_mempool_put_bulk(pool, (void *)p_free, n_free);
 			if (unlikely(mbuf != NULL)) {
 				/* There is the request to start new scan. */
@@ -1225,6 +1231,9 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
 			/* Exhausted packet, just free. */
 			mbuf = loc->mbuf;
 			loc->mbuf = mbuf->next;
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(mbuf);
 			loc->mbuf_off = 0;
 			MLX5_ASSERT(loc->mbuf_nseg > 1);
@@ -1267,6 +1276,9 @@ mlx5_tx_mseg_memcpy(uint8_t *pdst,
 				/* Exhausted packet, just free. */
 				mbuf = loc->mbuf;
 				loc->mbuf = mbuf->next;
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE);
+#endif
 				rte_pktmbuf_free_seg(mbuf);
 				loc->mbuf_off = 0;
 				MLX5_ASSERT(loc->mbuf_nseg >= 1);
@@ -1717,6 +1729,9 @@ mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
 			/* Zero length segment found, just skip. */
 			mbuf = loc->mbuf;
 			loc->mbuf = loc->mbuf->next;
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(mbuf);
 			if (--loc->mbuf_nseg == 0)
 				break;
@@ -2020,6 +2035,9 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
 			wqe->cseg.sq_ds -= RTE_BE32(1);
 			mbuf = loc->mbuf;
 			loc->mbuf = mbuf->next;
+#if RTE_MBUF_HISTORY_DEBUG
+			rte_mbuf_history_mark(mbuf, RTE_MBUF_PMD_FREE);
+#endif
 			rte_pktmbuf_free_seg(mbuf);
 			if (--nseg == 0)
 				break;
@@ -3319,6 +3337,9 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
 				 * Packet data are completely inlined,
 				 * free the packet immediately.
 				 */
+#if RTE_MBUF_HISTORY_DEBUG
+				rte_mbuf_history_mark(loc->mbuf, RTE_MBUF_PMD_FREE);
+#endif
 				rte_pktmbuf_free_seg(loc->mbuf);
 			} else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
 				     MLX5_TXOFF_CONFIG(MPW)) &&
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 2aa2475a8a..445d1d62c4 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -79,6 +79,9 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
 
 		MLX5_ASSERT(elt != NULL);
+#if RTE_MBUF_HISTORY_DEBUG
+		rte_mbuf_history_mark(elt, RTE_MBUF_PMD_FREE);
+#endif
 		rte_pktmbuf_free_seg(elt);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
 		/* Poisoning. */
-- 
2.34.1


  parent reply	other threads:[~2025-09-16 15:12 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-16  7:29 [RFC PATCH 0/5] Introduce mempool object new debug capabilities Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 1/5] mempool: record mempool objects operations history Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 2/5] drivers: add mempool history compilation flag Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 3/5] net/mlx5: mark an operation in mempool object's history Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 4/5] app/testpmd: add testpmd command to dump mempool history Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 5/5] usertool: add a script to parse mempool history dump Shani Peretz
2025-06-16 15:30 ` [RFC PATCH 0/5] Introduce mempool object new debug capabilities Stephen Hemminger
2025-06-19 12:57   ` Morten Brørup
2025-07-07  5:46     ` Shani Peretz
2025-07-07  5:45   ` Shani Peretz
2025-07-07 12:10     ` Morten Brørup
2025-07-19 14:39       ` Morten Brørup
2025-08-25 11:27         ` Slava Ovsiienko
2025-09-01 15:34           ` Morten Brørup
2025-09-16 15:12 ` [PATCH v2 0/4] add mbuf " Shani Peretz
2025-09-16 15:12   ` [PATCH v2 1/4] mbuf: record mbuf operations history Shani Peretz
2025-09-16 21:17     ` Stephen Hemminger
2025-09-16 21:33       ` Thomas Monjalon
2025-09-17  1:22         ` Morten Brørup
2025-09-16 15:12   ` Shani Peretz [this message]
2025-09-16 21:14     ` [PATCH v2 2/4] net/mlx5: mark an operation in mbuf's history Stephen Hemminger
2025-09-16 21:31       ` Thomas Monjalon
2025-09-16 15:12   ` [PATCH v2 3/4] app/testpmd: add testpmd command to dump mbuf history Shani Peretz
2025-09-16 15:12   ` [PATCH v2 4/4] usertool: add a script to parse mbuf history dump Shani Peretz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250916151207.556618-3-shperetz@nvidia.com \
    --to=shperetz@nvidia.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=bingz@nvidia.com \
    --cc=bruce.richardson@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=dsosnowski@nvidia.com \
    --cc=gakhil@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=matan@nvidia.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mb@smartsharesystems.com \
    --cc=orika@nvidia.com \
    --cc=stephen@networkplumber.org \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).