DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/2] add debug capabilities to ipool
@ 2025-03-24  8:18 Shani Peretz
  2025-03-24  8:18 ` [PATCH 1/2] net/mlx5: add ipool debug capabilities Shani Peretz
  2025-03-24  8:18 ` [PATCH 2/2] net/mlx5: added a bitmap that tracks ipool allocs and frees Shani Peretz
  0 siblings, 2 replies; 3+ messages in thread
From: Shani Peretz @ 2025-03-24  8:18 UTC (permalink / raw)
  To: dev; +Cc: rasland, Shani Peretz

Enhanced ipool debugging: Added new log component and verbosity levels for operations. 
Introduced a bitmap in debug mode to track allocations/deallocations, preventing doubles in per-core cache mode.

Shani Peretz (2):
  net/mlx5: add ipool debug capabilities
  net/mlx5: added a bitmap that tracks ipool allocs and frees

 drivers/net/mlx5/mlx5_utils.c | 151 +++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_utils.h |  21 +++++
 2 files changed, 171 insertions(+), 1 deletion(-)

-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] net/mlx5: add ipool debug capabilities
  2025-03-24  8:18 [PATCH 0/2] add debug capabilities to ipool Shani Peretz
@ 2025-03-24  8:18 ` Shani Peretz
  2025-03-24  8:18 ` [PATCH 2/2] net/mlx5: added a bitmap that tracks ipool allocs and frees Shani Peretz
  1 sibling, 0 replies; 3+ messages in thread
From: Shani Peretz @ 2025-03-24  8:18 UTC (permalink / raw)
  To: dev
  Cc: rasland, Shani Peretz, Bing Zhao, Dariusz Sosnowski,
	Viacheslav Ovsiienko, Ori Kam, Suanming Mou, Matan Azrad

Enhancing ipool debug capabilities by introducing new ipool log
component.
Also adding various logs in different verbosities for ipool operations.

Signed-off-by: Shani Peretz <shperetz@nvidia.com>
Acked-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/net/mlx5/mlx5_utils.c | 50 ++++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_utils.h |  9 +++++++
 2 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index d882af6047..b92ac44540 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -10,6 +10,11 @@
 
 /********************* Indexed pool **********************/
 
+int mlx5_logtype_ipool;
+
+/* Initialize driver log type. */
+RTE_LOG_REGISTER_SUFFIX(mlx5_logtype_ipool, ipool, NOTICE)
+
 static inline void
 mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
 {
@@ -115,6 +120,9 @@ mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
 	if (!cfg->per_core_cache)
 		pool->free_list = TRUNK_INVALID;
 	rte_spinlock_init(&pool->lcore_lock);
+
+	DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: per core cache mode %s",
+		      rte_lcore_id(), pool->cfg.type, pool->cfg.per_core_cache != 0 ? "on" : "off");
 	return pool;
 }
 
@@ -214,6 +222,9 @@ mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
 		mlx5_ipool_unlock(pool);
 		if (olc)
 			pool->cfg.free(olc);
+		DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: updated lcache %d "
+			      "ref %d, new %p, old %p", rte_lcore_id(), pool->cfg.type,
+			      cidx, lc->ref_cnt, (void *)lc, (void *)olc);
 	}
 	return lc;
 }
@@ -442,6 +453,13 @@ mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
 	entry = _mlx5_ipool_malloc_cache(pool, cidx, idx);
 	if (unlikely(cidx == RTE_MAX_LCORE))
 		rte_spinlock_unlock(&pool->lcore_lock);
+#ifdef POOL_DEBUG
+	++pool->n_entry;
+	DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: allocated entry %d lcore %d, "
+		      "current cache size %d, total allocated entries %d.", rte_lcore_id(),
+		      pool->cfg.type, *idx, cidx, pool->cache[cidx]->len, pool->n_entry);
+#endif
+
 	return entry;
 }
 
@@ -471,6 +489,9 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
 	if (pool->cache[cidx]->len < pool->cfg.per_core_cache) {
 		pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
 		pool->cache[cidx]->len++;
+		DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: freed entry %d "
+			      "back to lcache %d, lcache size %d.", rte_lcore_id(),
+			      pool->cfg.type, idx, cidx, pool->cache[cidx]->len);
 		return;
 	}
 	ilc = pool->cache[cidx];
@@ -493,6 +514,10 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
 		pool->cfg.free(olc);
 	pool->cache[cidx]->idx[pool->cache[cidx]->len] = idx;
 	pool->cache[cidx]->len++;
+
+	DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: cache reclaim, lcache %d, "
+		      "reclaimed: %d, gcache size %d.", rte_lcore_id(), pool->cfg.type,
+		      cidx, reclaim_num, pool->cache[cidx]->len);
 }
 
 static void
@@ -508,6 +533,10 @@ mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
 	_mlx5_ipool_free_cache(pool, cidx, idx);
 	if (unlikely(cidx == RTE_MAX_LCORE))
 		rte_spinlock_unlock(&pool->lcore_lock);
+
+#ifdef POOL_DEBUG
+	pool->n_entry--;
+#endif
 }
 
 void *
@@ -527,6 +556,8 @@ mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
 			mlx5_ipool_unlock(pool);
 			return NULL;
 		}
+		DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: add trunk: new size = %d",
+			      rte_lcore_id(), pool->cfg.type, pool->n_trunk_valid);
 	}
 	MLX5_ASSERT(pool->free_list != TRUNK_INVALID);
 	trunk = pool->trunks[pool->free_list];
@@ -550,7 +581,7 @@ mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
 	iidx += 1; /* non-zero index. */
 	trunk->free--;
 #ifdef POOL_DEBUG
-	pool->n_entry++;
+	++pool->n_entry;
 #endif
 	if (!trunk->free) {
 		/* Full trunk will be removed from free list in imalloc. */
@@ -567,6 +598,11 @@ mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx)
 	}
 	*idx = iidx;
 	mlx5_ipool_unlock(pool);
+#ifdef POOL_DEBUG
+	DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: allocated entry %d trunk_id %d, "
+		      "number of trunks %d, total allocated entries %d", rte_lcore_id(),
+		      pool->cfg.type, *idx, pool->free_list, pool->n_trunk_valid, pool->n_entry);
+#endif
 	return p;
 }
 
@@ -644,6 +680,8 @@ mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
 #ifdef POOL_DEBUG
 	pool->n_entry--;
 #endif
+	DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: freed entry %d trunk_id %d",
+		      rte_lcore_id(), pool->cfg.type, entry_idx + 1, trunk_idx);
 out:
 	mlx5_ipool_unlock(pool);
 }
@@ -688,6 +726,8 @@ mlx5_ipool_destroy(struct mlx5_indexed_pool *pool)
 
 	MLX5_ASSERT(pool);
 	mlx5_ipool_lock(pool);
+	DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: destroy", rte_lcore_id(), pool->cfg.type);
+
 	if (pool->cfg.per_core_cache) {
 		for (i = 0; i <= RTE_MAX_LCORE; i++) {
 			/*
@@ -757,6 +797,8 @@ mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
 	/* Clear global cache. */
 	for (i = 0; i < gc->len; i++)
 		rte_bitmap_clear(ibmp, gc->idx[i] - 1);
+	DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: flush gcache, gcache size = %d",
+		      rte_lcore_id(), pool->cfg.type, gc->len);
 	/* Clear core cache. */
 	for (i = 0; i < RTE_MAX_LCORE + 1; i++) {
 		struct mlx5_ipool_per_lcore *ilc = pool->cache[i];
@@ -765,6 +807,8 @@ mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool)
 			continue;
 		for (j = 0; j < ilc->len; j++)
 			rte_bitmap_clear(ibmp, ilc->idx[j] - 1);
+		DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: flush lcache %d",
+			      rte_lcore_id(), pool->cfg.type, i);
 	}
 }
 
@@ -831,6 +875,10 @@ mlx5_ipool_resize(struct mlx5_indexed_pool *pool, uint32_t num_entries,
 	mlx5_ipool_lock(pool);
 	pool->cfg.max_idx = num_entries;
 	mlx5_ipool_unlock(pool);
+
+	DRV_LOG_IPOOL(INFO,
+		      "lcore id %d: pool %s:, resize pool, new entries limit %d",
+		      rte_lcore_id(), pool->cfg.type, pool->cfg.max_idx);
 	return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index db2e33dfa9..68dcda5c4d 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -190,6 +190,15 @@ typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
 #define POOL_DEBUG 1
 #endif
 
+extern int mlx5_logtype_ipool;
+#define MLX5_NET_LOG_PREFIX_IPOOL "mlx5_ipool"
+
+/* Generic printf()-like logging macro with automatic line feed. */
+#define DRV_LOG_IPOOL(level, ...) \
+	PMD_DRV_LOG_(level, mlx5_logtype_ipool, MLX5_NET_LOG_PREFIX_IPOOL, \
+		__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
+		PMD_DRV_LOG_CPAREN)
+
 struct mlx5_indexed_pool_config {
 	uint32_t size; /* Pool entry size. */
 	uint32_t trunk_size:22;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 2/2] net/mlx5: added a bitmap that tracks ipool allocs and frees
  2025-03-24  8:18 [PATCH 0/2] add debug capabilities to ipool Shani Peretz
  2025-03-24  8:18 ` [PATCH 1/2] net/mlx5: add ipool debug capabilities Shani Peretz
@ 2025-03-24  8:18 ` Shani Peretz
  1 sibling, 0 replies; 3+ messages in thread
From: Shani Peretz @ 2025-03-24  8:18 UTC (permalink / raw)
  To: dev
  Cc: rasland, Shani Peretz, Bing Zhao, Dariusz Sosnowski,
	Viacheslav Ovsiienko, Ori Kam, Suanming Mou, Matan Azrad

The bitmap goal is to prevent double allocations and deallocations in
per core cache mode.
This validation occurs only in debug mode, ensuring it doesn't
impact performance.

Signed-off-by: Shani Peretz <shperetz@nvidia.com>
Acked-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/net/mlx5/mlx5_utils.c | 103 +++++++++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_utils.h |  12 ++++
 2 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index b92ac44540..f8cd7bc043 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -121,6 +121,9 @@ mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
 		pool->free_list = TRUNK_INVALID;
 	rte_spinlock_init(&pool->lcore_lock);
 
+#ifdef POOL_DEBUG
+	rte_spinlock_init(&pool->cache_validator.lock);
+#endif
 	DRV_LOG_IPOOL(INFO, "lcore id %d: pool %s: per core cache mode %s",
 		      rte_lcore_id(), pool->cfg.type, pool->cfg.per_core_cache != 0 ? "on" : "off");
 	return pool;
@@ -229,6 +232,55 @@ mlx5_ipool_update_global_cache(struct mlx5_indexed_pool *pool, int cidx)
 	return lc;
 }
 
+#ifdef POOL_DEBUG
+static void
+mlx5_ipool_grow_bmp(struct mlx5_indexed_pool *pool, uint32_t new_size)
+{
+	struct rte_bitmap *old_bmp = NULL;
+	void *old_bmp_mem = NULL;
+	uint32_t old_size = 0;
+	uint32_t i, bmp_mem_size;
+
+	if (pool->cache_validator.bmp_mem && pool->cache_validator.bmp) {
+		old_bmp = pool->cache_validator.bmp;
+		old_size = pool->cache_validator.bmp_size;
+		old_bmp_mem = pool->cache_validator.bmp_mem;
+	}
+
+	if (unlikely(new_size <= old_size))
+		return;
+
+	pool->cache_validator.bmp_size = new_size;
+	bmp_mem_size = rte_bitmap_get_memory_footprint(new_size);
+
+	pool->cache_validator.bmp_mem = pool->cfg.malloc(MLX5_MEM_ZERO, bmp_mem_size,
+										RTE_CACHE_LINE_SIZE,
+										rte_socket_id());
+	if (unlikely(!pool->cache_validator.bmp_mem)) {
+		DRV_LOG_IPOOL(ERR, "Unable to allocate memory for a new bitmap");
+		return;
+	}
+
+	pool->cache_validator.bmp = rte_bitmap_init_with_all_set(pool->cache_validator.bmp_size,
+								pool->cache_validator.bmp_mem,
+								bmp_mem_size);
+	if (unlikely(!pool->cache_validator.bmp)) {
+		DRV_LOG(ERR, "Unable to allocate memory for a new bitmap");
+		pool->cfg.free(pool->cache_validator.bmp_mem);
+		return;
+	}
+
+	if (old_bmp && old_bmp_mem) {
+		for (i = 0; i < old_size; i++) {
+			if (rte_bitmap_get(old_bmp, i) == 0)
+				rte_bitmap_clear(pool->cache_validator.bmp, i);
+		}
+		rte_bitmap_free(old_bmp);
+		pool->cfg.free(old_bmp_mem);
+	}
+}
+#endif
+
 static uint32_t
 mlx5_ipool_allocate_from_global(struct mlx5_indexed_pool *pool, int cidx)
 {
@@ -413,6 +465,50 @@ mlx5_ipool_get_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
 	return entry;
 }
 
+#ifdef POOL_DEBUG
+static void
+mlx5_ipool_validate_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
+{
+	rte_spinlock_lock(&pool->cache_validator.lock);
+	uint32_t entry_idx = idx - 1;
+	uint32_t allocated_size = pool->gc->n_trunk_valid *
+						mlx5_trunk_size_get(pool, pool->n_trunk_valid);
+
+	if (!pool->cache_validator.bmp)
+		mlx5_ipool_grow_bmp(pool, allocated_size);
+
+	if (pool->cache_validator.bmp_size < allocated_size)
+		mlx5_ipool_grow_bmp(pool, allocated_size);
+
+	if (rte_bitmap_get(pool->cache_validator.bmp, entry_idx) == 0) {
+		DRV_LOG_IPOOL(ERR, "lcore id %d: pool %s: detected double malloc idx: %d",
+			      rte_lcore_id(), pool->cfg.type, idx);
+		MLX5_ASSERT(0);
+	}
+	rte_bitmap_clear(pool->cache_validator.bmp, entry_idx);
+	rte_spinlock_unlock(&pool->cache_validator.lock);
+}
+
+static void
+mlx5_ipool_validate_free_cache(struct mlx5_indexed_pool *pool, uint32_t idx)
+{
+	rte_spinlock_lock(&pool->cache_validator.lock);
+	uint32_t entry_idx = idx - 1;
+
+	if (!pool->gc || !pool->cache_validator.bmp) {
+		rte_spinlock_unlock(&pool->cache_validator.lock);
+		return;
+	}
+
+	if (rte_bitmap_get(pool->cache_validator.bmp, entry_idx) != 0) {
+		DRV_LOG_IPOOL(ERR, "lcore id %d: pool %s: detected double free of index %d",
+			      rte_lcore_id(), pool->cfg.type, idx);
+		MLX5_ASSERT(0);
+	}
+	rte_bitmap_set(pool->cache_validator.bmp, entry_idx);
+	rte_spinlock_unlock(&pool->cache_validator.lock);
+}
+#endif
 
 static void *
 _mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, int cidx,
@@ -455,11 +551,11 @@ mlx5_ipool_malloc_cache(struct mlx5_indexed_pool *pool, uint32_t *idx)
 		rte_spinlock_unlock(&pool->lcore_lock);
 #ifdef POOL_DEBUG
 	++pool->n_entry;
+	mlx5_ipool_validate_malloc_cache(pool, *idx);
 	DRV_LOG_IPOOL(DEBUG, "lcore id %d: pool %s: allocated entry %d lcore %d, "
 		      "current cache size %d, total allocated entries %d.", rte_lcore_id(),
 		      pool->cfg.type, *idx, cidx, pool->cache[cidx]->len, pool->n_entry);
 #endif
-
 	return entry;
 }
 
@@ -471,6 +567,11 @@ _mlx5_ipool_free_cache(struct mlx5_indexed_pool *pool, int cidx, uint32_t idx)
 	uint32_t reclaim_num = 0;
 
 	MLX5_ASSERT(idx);
+
+#ifdef POOL_DEBUG
+	mlx5_ipool_validate_free_cache(pool, idx);
+#endif
+
 	/*
 	 * When index was allocated on core A but freed on core B. In this
 	 * case check if local cache on core B was allocated before.
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 68dcda5c4d..c65839c5d9 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -259,6 +259,15 @@ struct mlx5_ipool_per_lcore {
 	uint32_t idx[]; /**< Cache objects. */
 };
 
+#ifdef POOL_DEBUG
+struct mlx5_ipool_cache_validation {
+	rte_spinlock_t lock;
+	uint32_t bmp_size;
+	struct rte_bitmap *bmp;
+	void *bmp_mem;
+};
+#endif
+
 struct mlx5_indexed_pool {
 	struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
 	rte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */
@@ -279,6 +288,9 @@ struct mlx5_indexed_pool {
 			struct rte_bitmap *ibmp;
 			void *bmp_mem;
 			/* Allocate objects bitmap. Use during flush. */
+#ifdef POOL_DEBUG
+			struct mlx5_ipool_cache_validation cache_validator;
+#endif
 		};
 	};
 #ifdef POOL_DEBUG
-- 
2.34.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2025-03-24  8:19 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-03-24  8:18 [PATCH 0/2] add debug capabilities to ipool Shani Peretz
2025-03-24  8:18 ` [PATCH 1/2] net/mlx5: add ipool debug capabilities Shani Peretz
2025-03-24  8:18 ` [PATCH 2/2] net/mlx5: added a bitmap that tracks ipool allocs and frees Shani Peretz

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).