DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shani Peretz <shperetz@nvidia.com>
To: <dev@dpdk.org>
Cc: "Shani Peretz" <shperetz@nvidia.com>,
	"Thomas Monjalon" <thomas@monjalon.net>,
	"Ferruh Yigit" <ferruh.yigit@amd.com>,
	"Andrew Rybchenko" <andrew.rybchenko@oktetlabs.ru>,
	"Morten Brørup" <mb@smartsharesystems.com>
Subject: [RFC PATCH 1/5] mempool: record mempool objects operations history
Date: Mon, 16 Jun 2025 10:29:06 +0300	[thread overview]
Message-ID: <20250616072910.113042-2-shperetz@nvidia.com> (raw)
In-Reply-To: <20250616072910.113042-1-shperetz@nvidia.com>

This feature is designed to monitor the lifecycle of
mempool objects as they move between the application
and the PMD.

It will allow us to track the operations and transitions
of each mempool object throughout the system, helping in
debugging and understanding objects flow.

added a bitmap to the mempool object header that records
the most recent operations.
Each operation is represented by a 4-bit value, and for each mempool
object the history field stores 16 steps.

Signed-off-by: Shani Peretz <shperetz@nvidia.com>
---
 lib/ethdev/rte_ethdev.h   |  14 +++++
 lib/mempool/rte_mempool.c | 111 ++++++++++++++++++++++++++++++++++++++
 lib/mempool/rte_mempool.h | 106 ++++++++++++++++++++++++++++++++++++
 3 files changed, 231 insertions(+)

diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index f9fb6ae549..77d15f1bcb 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -167,6 +167,7 @@
 #include <rte_common.h>
 #include <rte_config.h>
 #include <rte_power_intrinsics.h>
+#include <rte_mempool.h>
 
 #include "rte_ethdev_trace_fp.h"
 #include "rte_dev_info.h"
@@ -6334,6 +6335,8 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
 
 	nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
 
+	rte_mempool_history_bulk((void **)rx_pkts, nb_rx, RTE_MEMPOOL_APP_RX);
+
 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
 	{
 		void *cb;
@@ -6692,8 +6695,19 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
 	}
 #endif
 
+#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+	uint16_t requested_pkts = nb_pkts;
+	rte_mempool_history_bulk((void **)tx_pkts, nb_pkts, RTE_MEMPOOL_PMD_TX);
+#endif
+
 	nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
 
+#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+	if (requested_pkts > nb_pkts)
+		rte_mempool_history_bulk((void **)tx_pkts + nb_pkts,
+				    requested_pkts - nb_pkts, RTE_MEMPOOL_BUSY_TX);
+#endif
+
 	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
 	return nb_pkts;
 }
diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 1021ede0c2..1d84c72ba0 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -32,6 +32,7 @@
 #include "mempool_trace.h"
 #include "rte_mempool.h"
 
+
 RTE_EXPORT_SYMBOL(rte_mempool_logtype)
 RTE_LOG_REGISTER_DEFAULT(rte_mempool_logtype, INFO);
 
@@ -1632,3 +1633,113 @@ RTE_INIT(mempool_init_telemetry)
 	rte_telemetry_register_cmd("/mempool/info", mempool_handle_info,
 		"Returns mempool info. Parameters: pool_name");
 }
+
+#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+static void
+rte_mempool_get_object_history_stat(FILE *f, struct rte_mempool *mp)
+{
+	struct rte_mempool_objhdr *hdr;
+
+	uint64_t n_never = 0;   /* never been allocated. */
+	uint64_t n_free = 0;    /* returned to the pool. */
+	uint64_t n_alloc = 0;   /* allocated from the pool. */
+	uint64_t n_ref = 0;     /* freed by pmd, not returned to the pool. */
+	uint64_t n_pmd_tx = 0;  /* owned by PMD Tx. */
+	uint64_t n_pmd_rx = 0;  /* owned by PMD Rx. */
+	uint64_t n_app_rx = 0;  /* owned by Application on Rx. */
+	uint64_t n_app_alloc = 0; /* owned by Application on Alloc. */
+	uint64_t n_busy_tx = 0; /* owned by Application on Busy Tx. */
+	uint64_t n_total = 0;   /* Total amount. */
+
+	if (f == NULL)
+		return;
+
+	STAILQ_FOREACH(hdr, &mp->elt_list, next) {
+		uint64_t hs = hdr->history;
+
+		rte_rmb();
+		n_total++;
+		if (hs == 0) {
+			n_never++;
+			continue;
+		}
+		switch (hs & RTE_MEMPOOL_HISTORY_MASK) {
+		case RTE_MEMPOOL_FREE:
+			n_free++;
+			break;
+		case RTE_MEMPOOL_PMD_FREE:
+			n_alloc++;
+			n_ref++;
+			break;
+		case RTE_MEMPOOL_PMD_TX:
+			n_alloc++;
+			n_pmd_tx++;
+			break;
+		case RTE_MEMPOOL_APP_RX:
+			n_alloc++;
+			n_app_rx++;
+			break;
+		case RTE_MEMPOOL_PMD_ALLOC:
+			n_alloc++;
+			n_pmd_rx++;
+			break;
+		case RTE_MEMPOOL_ALLOC:
+			n_alloc++;
+			n_app_alloc++;
+			break;
+		case RTE_MEMPOOL_BUSY_TX:
+			n_alloc++;
+			n_busy_tx++;
+			break;
+		default:
+			break;
+		}
+		fprintf(f, "%016" PRIX64 "\n", hs);
+	}
+
+	fprintf(f, "\n"
+		"Populated:       %u\n"
+		"Never allocated: %" PRIu64 "\n"
+		"Free:            %" PRIu64 "\n"
+		"Allocated:       %" PRIu64 "\n"
+		"Referenced free: %" PRIu64 "\n"
+		"PMD owned Tx:    %" PRIu64 "\n"
+		"PMD owned Rx:    %" PRIu64 "\n"
+		"App owned alloc: %" PRIu64 "\n"
+		"App owned Rx:    %" PRIu64 "\n"
+		"App owned busy:  %" PRIu64 "\n"
+		"Counted total:   %" PRIu64 "\n",
+		mp->populated_size, n_never, n_free + n_never, n_alloc,
+		n_ref, n_pmd_tx, n_pmd_rx, n_app_alloc, n_app_rx,
+		n_busy_tx, n_total);
+}
+#endif
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_mempool_objects_dump, 24.07)
+void
+rte_mempool_objects_dump(__rte_unused FILE *f)
+{
+	#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+	if (f == NULL) {
+		RTE_MEMPOOL_LOG(ERR, "Invalid file pointer");
+		return;
+	}
+
+	struct rte_mempool *mp = NULL;
+	struct rte_tailq_entry *te;
+	struct rte_mempool_list *mempool_list;
+
+	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+
+	rte_mcfg_mempool_read_lock();
+
+	TAILQ_FOREACH(te, mempool_list, next) {
+		mp = (struct rte_mempool *) te->data;
+		rte_mempool_get_object_history_stat(f, mp);
+	}
+
+	rte_mcfg_mempool_read_unlock();
+#else
+	RTE_MEMPOOL_LOG(INFO, "Mempool history recorder is not supported");
+#endif
+}
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index aedc100964..4c0ea6d0a7 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -60,6 +60,26 @@ extern "C" {
 #define RTE_MEMPOOL_HEADER_COOKIE2  0xf2eef2eedadd2e55ULL /**< Header cookie. */
 #define RTE_MEMPOOL_TRAILER_COOKIE  0xadd2e55badbadbadULL /**< Trailer cookie.*/
 
+/**
+ * Mempool trace operation bits and masks.
+ * Used to record the lifecycle of mempool objects through the system.
+ */
+#define RTE_MEMPOOL_HISTORY_BITS          4 /*Number of bits for history operation*/
+#define RTE_MEMPOOL_HISTORY_MASK          ((1ULL << RTE_MEMPOOL_HISTORY_BITS) - 1)
+
+/* History operation types */
+enum rte_mempool_history_op {
+	RTE_MEMPOOL_NEVER = 0,     /* Initial state - never allocated */
+	RTE_MEMPOOL_FREE = 1,      /* Freed back to mempool */
+	RTE_MEMPOOL_PMD_FREE = 2,  /* Freed by PMD back to mempool */
+	RTE_MEMPOOL_PMD_TX = 3,    /* Sent to PMD for Tx */
+	RTE_MEMPOOL_APP_RX = 4,    /* Returned to application on Rx */
+	RTE_MEMPOOL_PMD_ALLOC = 5, /* Allocated by PMD for Rx */
+	RTE_MEMPOOL_ALLOC = 6,     /* Allocated by application */
+	RTE_MEMPOOL_BUSY_TX = 7,   /* Returned to app due to Tx busy */
+	RTE_MEMPOOL_MAX = 8        /* Maximum trace operation value */
+};
+
 #ifdef RTE_LIBRTE_MEMPOOL_STATS
 /**
  * A structure that stores the mempool statistics (per-lcore).
@@ -157,6 +177,9 @@ struct rte_mempool_objhdr {
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	uint64_t cookie;                 /**< Debug cookie. */
 #endif
+#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+	uint64_t history;                 /**< Debug object history. */
+#endif
 };
 
 /**
@@ -457,6 +480,83 @@ void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
 
 #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
 
+
+#if RTE_MEMPOOL_DEBUG_OBJECTS_HISTORY
+/**
+ * Get the history value from a mempool object header.
+ *
+ * @param obj
+ *   Pointer to the mempool object.
+ * @return
+ *   The history value from the object header.
+ */
+static inline uint64_t rte_mempool_history_get(void *obj)
+{
+	struct rte_mempool_objhdr *hdr;
+
+	if (unlikely(obj == NULL))
+		return 0;
+
+	hdr = rte_mempool_get_header(obj);
+	return hdr->history;
+}
+
+/**
+ * Mark a mempool object with the history value.
+ *
+ * @param obj
+ *   Pointer to the mempool object.
+ * @param mark
+ *   The history mark value to add.
+ */
+static inline void rte_mempool_history_mark(void *obj, uint32_t mark)
+{
+	struct rte_mempool_objhdr *hdr;
+
+	if (unlikely(obj == NULL))
+		return;
+
+	hdr = rte_mempool_get_header(obj);
+	hdr->history = (hdr->history << RTE_MEMPOOL_HISTORY_BITS) | mark;
+}
+
+/**
+ * Mark multiple mempool objects with the history value.
+ *
+ * @param b
+ *   Array of pointers to mempool objects.
+ * @param n
+ *   Number of objects to mark.
+ * @param mark
+ *   The history mark value to add to each object.
+ */
+static inline void rte_mempool_history_bulk(void * const *b, uint32_t n, uint32_t mark)
+{
+	if (unlikely(b == NULL))
+		return;
+
+	while (n--)
+		rte_mempool_history_mark(*b++, mark);
+}
+#else
+static inline uint64_t rte_mempool_history_get(void *obj)
+{
+	RTE_SET_USED(obj);
+	return 0;
+}
+static inline void rte_mempool_history_mark(void *obj, uint32_t mark)
+{
+	RTE_SET_USED(obj);
+	RTE_SET_USED(mark);
+}
+static inline void rte_mempool_history_bulk(void * const *b, uint32_t n, uint32_t mark)
+{
+	RTE_SET_USED(b);
+	RTE_SET_USED(n);
+	RTE_SET_USED(mark);
+}
+#endif
+
 /**
  * Prototype for implementation specific data provisioning function.
  *
@@ -1395,6 +1495,7 @@ rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table,
 	/* Increment stats now, adding in mempool always succeeds. */
 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_bulk, 1);
 	RTE_MEMPOOL_CACHE_STAT_ADD(cache, put_objs, n);
+	rte_mempool_history_bulk(obj_table, n, RTE_MEMPOOL_FREE);
 
 	__rte_assume(cache->flushthresh <= RTE_MEMPOOL_CACHE_MAX_SIZE * 2);
 	__rte_assume(cache->len <= RTE_MEMPOOL_CACHE_MAX_SIZE * 2);
@@ -1661,6 +1762,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table,
 	ret = rte_mempool_do_generic_get(mp, obj_table, n, cache);
 	if (likely(ret == 0))
 		RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1);
+	rte_mempool_history_bulk(obj_table, n, RTE_MEMPOOL_ALLOC);
 	rte_mempool_trace_generic_get(mp, obj_table, n, cache);
 	return ret;
 }
@@ -1876,6 +1978,10 @@ static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
 		RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
 }
 
+__rte_experimental
+void
+rte_mempool_objects_dump(FILE *f);
+
 /**
  * Dump the status of all mempools on the console
  *
-- 
2.34.1


  reply	other threads:[~2025-06-16  7:29 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-16  7:29 [RFC PATCH 0/5] Introduce mempool object new debug capabilities Shani Peretz
2025-06-16  7:29 ` Shani Peretz [this message]
2025-06-16  7:29 ` [RFC PATCH 2/5] drivers: add mempool history compilation flag Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 3/5] net/mlx5: mark an operation in mempool object's history Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 4/5] app/testpmd: add testpmd command to dump mempool history Shani Peretz
2025-06-16  7:29 ` [RFC PATCH 5/5] usertool: add a script to parse mempool history dump Shani Peretz
2025-06-16 15:30 ` [RFC PATCH 0/5] Introduce mempool object new debug capabilities Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250616072910.113042-2-shperetz@nvidia.com \
    --to=shperetz@nvidia.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=mb@smartsharesystems.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).