DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 02/10] net/mlx5: add trunk dynamic grow for indexed pool
Date: Thu, 16 Apr 2020 10:42:00 +0800	[thread overview]
Message-ID: <1587004928-328077-3-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1587004928-328077-1-git-send-email-suanmingm@mellanox.com>

This commit add trunk dynamic grow for the indexed pool.

In case for pools which are not sure the entry number needed, pools can
be configured in increase progressively mode. It means the trunk size
will be increased dynamically one after one, then reach a stable value.
It saves memory to avoid allocate a very big trunk at beginning.

User should set both the grow_shift and grow_trunk to help the trunk grow
works. Keep one or both grow_shift and grow_trunk as 0 makes the trunk
work as fixed size.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5_utils.c | 105 +++++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_utils.h |  23 +++++++--
 2 files changed, 108 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 4cab7f0..e63921d 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -132,16 +132,69 @@ struct mlx5_hlist_entry *
 		rte_spinlock_unlock(&pool->lock);
 }
 
+static inline uint32_t
+mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+	uint32_t trunk_idx = 0;
+	uint32_t i;
+
+	if (!cfg->grow_trunk)
+		return entry_idx / cfg->trunk_size;
+	if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
+		trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
+			    (cfg->trunk_size << (cfg->grow_shift *
+			    cfg->grow_trunk)) + cfg->grow_trunk;
+	} else {
+		for (i = 0; i < cfg->grow_trunk; i++) {
+			if (entry_idx < pool->grow_tbl[i])
+				break;
+		}
+		trunk_idx = i;
+	}
+	return trunk_idx;
+}
+
+static inline uint32_t
+mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+
+	return cfg->trunk_size << (cfg->grow_shift *
+	       (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
+}
+
+static inline uint32_t
+mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+	uint32_t offset = 0;
+
+	if (!trunk_idx)
+		return 0;
+	if (!cfg->grow_trunk)
+		return cfg->trunk_size * trunk_idx;
+	if (trunk_idx < cfg->grow_trunk)
+		offset = pool->grow_tbl[trunk_idx - 1];
+	else
+		offset = pool->grow_tbl[cfg->grow_trunk - 1] +
+			 (cfg->trunk_size << (cfg->grow_shift *
+			 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
+	return offset;
+}
+
 struct mlx5_indexed_pool *
 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
 {
 	struct mlx5_indexed_pool *pool;
+	uint32_t i;
 
 	if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) ||
 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
 		return NULL;
-	pool = rte_zmalloc("mlx5_ipool", sizeof(*pool), RTE_CACHE_LINE_SIZE);
+	pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk *
+				sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE);
 	if (!pool)
 		return NULL;
 	pool->cfg = *cfg;
@@ -154,6 +207,15 @@ struct mlx5_indexed_pool *
 	pool->free_list = TRUNK_INVALID;
 	if (pool->cfg.need_lock)
 		rte_spinlock_init(&pool->lock);
+	/*
+	 * Initialize the dynamic grow trunk size lookup table to have a quick
+	 * lookup for the trunk entry index offset.
+	 */
+	for (i = 0; i < cfg->grow_trunk; i++) {
+		pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
+		if (i > 0)
+			pool->grow_tbl[i] += pool->grow_tbl[i - 1];
+	}
 	return pool;
 }
 
@@ -164,6 +226,7 @@ struct mlx5_indexed_pool *
 	struct mlx5_indexed_trunk **trunk_tmp;
 	struct mlx5_indexed_trunk **p;
 	size_t trunk_size = 0;
+	size_t data_size;
 	size_t bmp_size;
 	uint32_t idx;
 
@@ -193,23 +256,23 @@ struct mlx5_indexed_pool *
 	}
 	idx = pool->n_trunk_valid;
 	trunk_size += sizeof(*trunk);
-	bmp_size = rte_bitmap_get_memory_footprint(pool->cfg.trunk_size);
-	trunk_size += pool->cfg.trunk_size * pool->cfg.size + bmp_size;
+	data_size = mlx5_trunk_size_get(pool, idx);
+	bmp_size = rte_bitmap_get_memory_footprint(data_size);
+	trunk_size += data_size * pool->cfg.size + bmp_size;
 	trunk = pool->cfg.malloc(pool->cfg.type, trunk_size,
 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
 	if (!trunk)
 		return -ENOMEM;
 	pool->trunks[idx] = trunk;
 	trunk->idx = idx;
-	trunk->free = pool->cfg.trunk_size;
+	trunk->free = data_size;
 	trunk->prev = TRUNK_INVALID;
 	trunk->next = TRUNK_INVALID;
 	MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
 	pool->free_list = idx;
 	/* Mark all entries as available. */
-	trunk->bmp = rte_bitmap_init_with_all_set(pool->cfg.trunk_size,
-		     &trunk->data[pool->cfg.trunk_size  * pool->cfg.size],
-		     bmp_size);
+	trunk->bmp = rte_bitmap_init_with_all_set(data_size,
+		     &trunk->data[data_size * pool->cfg.size], bmp_size);
 	pool->n_trunk_valid++;
 #ifdef POOL_DEBUG
 	pool->trunk_new++;
@@ -244,10 +307,10 @@ struct mlx5_indexed_pool *
 	MLX5_ASSERT(slab);
 	iidx += __builtin_ctzll(slab);
 	MLX5_ASSERT(iidx != UINT32_MAX);
-	MLX5_ASSERT(iidx < pool->cfg.trunk_size);
+	MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
 	rte_bitmap_clear(trunk->bmp, iidx);
 	p = &trunk->data[iidx * pool->cfg.size];
-	iidx += trunk->idx * pool->cfg.trunk_size;
+	iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
 	iidx += 1; /* non-zero index. */
 	trunk->free--;
 #ifdef POOL_DEBUG
@@ -286,19 +349,23 @@ struct mlx5_indexed_pool *
 {
 	struct mlx5_indexed_trunk *trunk;
 	uint32_t trunk_idx;
+	uint32_t entry_idx;
 
 	if (!idx)
 		return;
 	idx -= 1;
 	mlx5_ipool_lock(pool);
-	trunk_idx = idx / pool->cfg.trunk_size;
+	trunk_idx = mlx5_trunk_idx_get(pool, idx);
 	if (trunk_idx >= pool->n_trunk_valid)
 		goto out;
 	trunk = pool->trunks[trunk_idx];
-	if (!trunk || trunk_idx != trunk->idx ||
-	    rte_bitmap_get(trunk->bmp, idx % pool->cfg.trunk_size))
+	if (!trunk)
+		goto out;
+	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
+	if (trunk_idx != trunk->idx ||
+	    rte_bitmap_get(trunk->bmp, entry_idx))
 		goto out;
-	rte_bitmap_set(trunk->bmp, idx % pool->cfg.trunk_size);
+	rte_bitmap_set(trunk->bmp, entry_idx);
 	trunk->free++;
 	if (trunk->free == 1) {
 		/* Put into free trunk list head. */
@@ -326,19 +393,23 @@ struct mlx5_indexed_pool *
 	struct mlx5_indexed_trunk *trunk;
 	void *p = NULL;
 	uint32_t trunk_idx;
+	uint32_t entry_idx;
 
 	if (!idx)
 		return NULL;
 	idx -= 1;
 	mlx5_ipool_lock(pool);
-	trunk_idx = idx / pool->cfg.trunk_size;
+	trunk_idx = mlx5_trunk_idx_get(pool, idx);
 	if (trunk_idx >= pool->n_trunk_valid)
 		goto out;
 	trunk = pool->trunks[trunk_idx];
-	if (!trunk || trunk_idx != trunk->idx ||
-	    rte_bitmap_get(trunk->bmp, idx % pool->cfg.trunk_size))
+	if (!trunk)
+		goto out;
+	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
+	if (trunk_idx != trunk->idx ||
+	    rte_bitmap_get(trunk->bmp, entry_idx))
 		goto out;
-	p = &trunk->data[(idx % pool->cfg.trunk_size) * pool->cfg.size];
+	p = &trunk->data[entry_idx * pool->cfg.size];
 out:
 	mlx5_ipool_unlock(pool);
 	return p;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index e404a5c..af96a87 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -81,9 +81,25 @@
 
 struct mlx5_indexed_pool_config {
 	uint32_t size; /* Pool entry size. */
-	uint32_t trunk_size;
-	/* Trunk entry number. Must be power of 2. */
-	uint32_t need_lock;
+	uint32_t trunk_size:22;
+	/*
+	 * Trunk entry number. Must be power of 2. It can be increased
+	 * if trunk_grow enable. The trunk entry number increases with
+	 * left shift grow_shift. Trunks with index are after grow_trunk
+	 * will keep the entry number same with the last grow trunk.
+	 */
+	uint32_t grow_trunk:4;
+	/*
+	 * Trunks with entry number increase in the pool. Set it to 0
+	 * to make the pool works as trunk entry fixed pool. It works
+	 * only if grow_shift is not 0.
+	 */
+	uint32_t grow_shift:4;
+	/*
+	 * Trunk entry number increase shift value, stop after grow_trunk.
+	 * It works only if grow_trunk is not 0.
+	 */
+	uint32_t need_lock:1;
 	/* Lock is needed for multiple thread usage. */
 	const char *type; /* Memory allocate type name. */
 	void *(*malloc)(const char *type, size_t size, unsigned int align,
@@ -116,6 +132,7 @@ struct mlx5_indexed_pool {
 	int64_t trunk_empty;
 	int64_t trunk_free;
 #endif
+	uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
 };
 
 /**
-- 
1.8.3.1


  parent reply	other threads:[~2020-04-16  2:42 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-13  1:11 [dpdk-dev] [PATCH 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 02/10] net/mlx5: add trunk dynamic grow for indexed pool Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 03/10] net/mlx5: add trunk release " Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 04/10] net/mlx5: convert encap/decap resource to indexed Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 05/10] net/mlx5: convert push VLAN " Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-13  1:11 ` [dpdk-dev] [PATCH 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16  2:41 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Suanming Mou
2020-04-16  2:41   ` [dpdk-dev] [PATCH v2 01/10] net/mlx5: add indexed memory pool Suanming Mou
2020-04-16  2:42   ` Suanming Mou [this message]
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 03/10] net/mlx5: add trunk release for indexed pool Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 04/10] net/mlx5: convert encap/decap resource to indexed Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 05/10] net/mlx5: convert push VLAN " Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 06/10] net/mlx5: convert tag " Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 07/10] net/mlx5: convert port id action " Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 08/10] net/mlx5: convert jump resource " Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 09/10] net/mlx5: convert hrxq " Suanming Mou
2020-04-16  2:42   ` [dpdk-dev] [PATCH v2 10/10] net/mlx5: convert flow dev handle " Suanming Mou
2020-04-16 15:08   ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow resource allocation Raslan Darawsheh
2020-04-17 14:58   ` Ferruh Yigit
2020-04-18  1:46     ` Suanming Mou

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1587004928-328077-3-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).