DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@nvidia.com>
To: <viacheslavo@nvidia.com>, <matan@nvidia.com>
Cc: <rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v3 18/22] common/mlx5: optimize cache list object memory
Date: Fri, 2 Jul 2021 09:18:12 +0300	[thread overview]
Message-ID: <20210702061816.10454-19-suanmingm@nvidia.com> (raw)
In-Reply-To: <20210702061816.10454-1-suanmingm@nvidia.com>

Currently, hash list uses the cache list as bucket list. The list
in the buckets have the same name, ctx and callbacks. This wastes
the memory.

This commit abstracts all the name, ctx and callback members in the
list to a constant struct and others to the inconstant struct, uses
the wrapper functions to satisfy both hash list and cache list can
set the list constant and inconstant struct individually.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_common_utils.c | 295 ++++++++++++++----------
 drivers/common/mlx5/mlx5_common_utils.h |  45 ++--
 2 files changed, 201 insertions(+), 139 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index f75b1cb0da..858c8d8164 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -14,34 +14,16 @@
 /********************* mlx5 list ************************/
 
 static int
-mlx5_list_init(struct mlx5_list *list, const char *name, void *ctx,
-	       bool lcores_share, struct mlx5_list_cache *gc,
-	       mlx5_list_create_cb cb_create,
-	       mlx5_list_match_cb cb_match,
-	       mlx5_list_remove_cb cb_remove,
-	       mlx5_list_clone_cb cb_clone,
-	       mlx5_list_clone_free_cb cb_clone_free)
+mlx5_list_init(struct mlx5_list_inconst *l_inconst,
+	       struct mlx5_list_const *l_const,
+	       struct mlx5_list_cache *gc)
 {
-	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
-	    !cb_clone_free) {
-		rte_errno = EINVAL;
-		return -EINVAL;
+	rte_rwlock_init(&l_inconst->lock);
+	if (l_const->lcores_share) {
+		l_inconst->cache[RTE_MAX_LCORE] = gc;
+		LIST_INIT(&l_inconst->cache[RTE_MAX_LCORE]->h);
 	}
-	if (name)
-		snprintf(list->name, sizeof(list->name), "%s", name);
-	list->ctx = ctx;
-	list->lcores_share = lcores_share;
-	list->cb_create = cb_create;
-	list->cb_match = cb_match;
-	list->cb_remove = cb_remove;
-	list->cb_clone = cb_clone;
-	list->cb_clone_free = cb_clone_free;
-	rte_rwlock_init(&list->lock);
-	if (lcores_share) {
-		list->cache[RTE_MAX_LCORE] = gc;
-		LIST_INIT(&list->cache[RTE_MAX_LCORE]->h);
-	}
-	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
+	DRV_LOG(DEBUG, "mlx5 list %s initialized.", l_const->name);
 	return 0;
 }
 
@@ -56,16 +38,30 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,
 	struct mlx5_list *list;
 	struct mlx5_list_cache *gc = NULL;
 
+	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
+	    !cb_clone_free) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
 	list = mlx5_malloc(MLX5_MEM_ZERO,
 			   sizeof(*list) + (lcores_share ? sizeof(*gc) : 0),
 			   0, SOCKET_ID_ANY);
+
 	if (!list)
 		return NULL;
+	if (name)
+		snprintf(list->l_const.name,
+			 sizeof(list->l_const.name), "%s", name);
+	list->l_const.ctx = ctx;
+	list->l_const.lcores_share = lcores_share;
+	list->l_const.cb_create = cb_create;
+	list->l_const.cb_match = cb_match;
+	list->l_const.cb_remove = cb_remove;
+	list->l_const.cb_clone = cb_clone;
+	list->l_const.cb_clone_free = cb_clone_free;
 	if (lcores_share)
 		gc = (struct mlx5_list_cache *)(list + 1);
-	if (mlx5_list_init(list, name, ctx, lcores_share, gc,
-			   cb_create, cb_match, cb_remove, cb_clone,
-			   cb_clone_free) != 0) {
+	if (mlx5_list_init(&list->l_inconst, &list->l_const, gc) != 0) {
 		mlx5_free(list);
 		return NULL;
 	}
@@ -73,19 +69,21 @@ mlx5_list_create(const char *name, void *ctx, bool lcores_share,
 }
 
 static struct mlx5_list_entry *
-__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
+__list_lookup(struct mlx5_list_inconst *l_inconst,
+	      struct mlx5_list_const *l_const,
+	      int lcore_index, void *ctx, bool reuse)
 {
 	struct mlx5_list_entry *entry =
-				LIST_FIRST(&list->cache[lcore_index]->h);
+				LIST_FIRST(&l_inconst->cache[lcore_index]->h);
 	uint32_t ret;
 
 	while (entry != NULL) {
-		if (list->cb_match(list->ctx, entry, ctx) == 0) {
+		if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
 			if (reuse) {
 				ret = __atomic_add_fetch(&entry->ref_cnt, 1,
 							 __ATOMIC_RELAXED) - 1;
 				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
-					list->name, (void *)entry,
+					l_const->name, (void *)entry,
 					entry->ref_cnt);
 			} else if (lcore_index < RTE_MAX_LCORE) {
 				ret = __atomic_load_n(&entry->ref_cnt,
@@ -101,41 +99,55 @@ __list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
 	return NULL;
 }
 
-struct mlx5_list_entry *
-mlx5_list_lookup(struct mlx5_list *list, void *ctx)
+static inline struct mlx5_list_entry *
+_mlx5_list_lookup(struct mlx5_list_inconst *l_inconst,
+		  struct mlx5_list_const *l_const, void *ctx)
 {
 	struct mlx5_list_entry *entry = NULL;
 	int i;
 
-	rte_rwlock_read_lock(&list->lock);
+	rte_rwlock_read_lock(&l_inconst->lock);
 	for (i = 0; i < RTE_MAX_LCORE; i++) {
-		entry = __list_lookup(list, i, ctx, false);
+		if (!l_inconst->cache[i])
+			continue;
+		entry = __list_lookup(l_inconst, l_const, i, ctx, false);
 		if (entry)
 			break;
 	}
-	rte_rwlock_read_unlock(&list->lock);
+	rte_rwlock_read_unlock(&l_inconst->lock);
 	return entry;
 }
 
+struct mlx5_list_entry *
+mlx5_list_lookup(struct mlx5_list *list, void *ctx)
+{
+	return _mlx5_list_lookup(&list->l_inconst, &list->l_const, ctx);
+}
+
+
 static struct mlx5_list_entry *
-mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
+mlx5_list_cache_insert(struct mlx5_list_inconst *l_inconst,
+		       struct mlx5_list_const *l_const, int lcore_index,
 		       struct mlx5_list_entry *gentry, void *ctx)
 {
-	struct mlx5_list_entry *lentry = list->cb_clone(list->ctx, gentry, ctx);
+	struct mlx5_list_entry *lentry =
+			l_const->cb_clone(l_const->ctx, gentry, ctx);
 
 	if (unlikely(!lentry))
 		return NULL;
 	lentry->ref_cnt = 1u;
 	lentry->gentry = gentry;
 	lentry->lcore_idx = (uint32_t)lcore_index;
-	LIST_INSERT_HEAD(&list->cache[lcore_index]->h, lentry, next);
+	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, lentry, next);
 	return lentry;
 }
 
 static void
-__list_cache_clean(struct mlx5_list *list, int lcore_index)
+__list_cache_clean(struct mlx5_list_inconst *l_inconst,
+		   struct mlx5_list_const *l_const,
+		   int lcore_index)
 {
-	struct mlx5_list_cache *c = list->cache[lcore_index];
+	struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
 	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
 	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
 					       __ATOMIC_RELAXED);
@@ -145,108 +157,123 @@ __list_cache_clean(struct mlx5_list *list, int lcore_index)
 
 		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
 			LIST_REMOVE(entry, next);
-			if (list->lcores_share)
-				list->cb_clone_free(list->ctx, entry);
+			if (l_const->lcores_share)
+				l_const->cb_clone_free(l_const->ctx, entry);
 			else
-				list->cb_remove(list->ctx, entry);
+				l_const->cb_remove(l_const->ctx, entry);
 			inv_cnt--;
 		}
 		entry = nentry;
 	}
 }
 
-struct mlx5_list_entry *
-mlx5_list_register(struct mlx5_list *list, void *ctx)
+static inline struct mlx5_list_entry *
+_mlx5_list_register(struct mlx5_list_inconst *l_inconst,
+		    struct mlx5_list_const *l_const,
+		    void *ctx)
 {
 	struct mlx5_list_entry *entry, *local_entry;
 	volatile uint32_t prev_gen_cnt = 0;
 	int lcore_index = rte_lcore_index(rte_lcore_id());
 
-	MLX5_ASSERT(list);
+	MLX5_ASSERT(l_inconst);
 	MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
 	if (unlikely(lcore_index == -1)) {
 		rte_errno = ENOTSUP;
 		return NULL;
 	}
-	if (unlikely(!list->cache[lcore_index])) {
-		list->cache[lcore_index] = mlx5_malloc(0,
+	if (unlikely(!l_inconst->cache[lcore_index])) {
+		l_inconst->cache[lcore_index] = mlx5_malloc(0,
 					sizeof(struct mlx5_list_cache),
 					RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
-		if (!list->cache[lcore_index]) {
+		if (!l_inconst->cache[lcore_index]) {
 			rte_errno = ENOMEM;
 			return NULL;
 		}
-		list->cache[lcore_index]->inv_cnt = 0;
-		LIST_INIT(&list->cache[lcore_index]->h);
+		l_inconst->cache[lcore_index]->inv_cnt = 0;
+		LIST_INIT(&l_inconst->cache[lcore_index]->h);
 	}
 	/* 0. Free entries that was invalidated by other lcores. */
-	__list_cache_clean(list, lcore_index);
+	__list_cache_clean(l_inconst, l_const, lcore_index);
 	/* 1. Lookup in local cache. */
-	local_entry = __list_lookup(list, lcore_index, ctx, true);
+	local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
 	if (local_entry)
 		return local_entry;
-	if (list->lcores_share) {
+	if (l_const->lcores_share) {
 		/* 2. Lookup with read lock on global list, reuse if found. */
-		rte_rwlock_read_lock(&list->lock);
-		entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
+		rte_rwlock_read_lock(&l_inconst->lock);
+		entry = __list_lookup(l_inconst, l_const, RTE_MAX_LCORE,
+				      ctx, true);
 		if (likely(entry)) {
-			rte_rwlock_read_unlock(&list->lock);
-			return mlx5_list_cache_insert(list, lcore_index, entry,
-						      ctx);
+			rte_rwlock_read_unlock(&l_inconst->lock);
+			return mlx5_list_cache_insert(l_inconst, l_const,
+						      lcore_index,
+						      entry, ctx);
 		}
-		prev_gen_cnt = list->gen_cnt;
-		rte_rwlock_read_unlock(&list->lock);
+		prev_gen_cnt = l_inconst->gen_cnt;
+		rte_rwlock_read_unlock(&l_inconst->lock);
 	}
 	/* 3. Prepare new entry for global list and for cache. */
-	entry = list->cb_create(list->ctx, ctx);
+	entry = l_const->cb_create(l_const->ctx, ctx);
 	if (unlikely(!entry))
 		return NULL;
 	entry->ref_cnt = 1u;
-	if (!list->lcores_share) {
+	if (!l_const->lcores_share) {
 		entry->lcore_idx = (uint32_t)lcore_index;
-		LIST_INSERT_HEAD(&list->cache[lcore_index]->h, entry, next);
-		__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
+		LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
+				 entry, next);
+		__atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
 		DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
-			list->name, lcore_index, (void *)entry, entry->ref_cnt);
+			l_const->name, lcore_index,
+			(void *)entry, entry->ref_cnt);
 		return entry;
 	}
-	local_entry = list->cb_clone(list->ctx, entry, ctx);
+	local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
 	if (unlikely(!local_entry)) {
-		list->cb_remove(list->ctx, entry);
+		l_const->cb_remove(l_const->ctx, entry);
 		return NULL;
 	}
 	local_entry->ref_cnt = 1u;
 	local_entry->gentry = entry;
 	local_entry->lcore_idx = (uint32_t)lcore_index;
-	rte_rwlock_write_lock(&list->lock);
+	rte_rwlock_write_lock(&l_inconst->lock);
 	/* 4. Make sure the same entry was not created before the write lock. */
-	if (unlikely(prev_gen_cnt != list->gen_cnt)) {
-		struct mlx5_list_entry *oentry = __list_lookup(list,
+	if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
+		struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
+							       l_const,
 							       RTE_MAX_LCORE,
 							       ctx, true);
 
 		if (unlikely(oentry)) {
 			/* 4.5. Found real race!!, reuse the old entry. */
-			rte_rwlock_write_unlock(&list->lock);
-			list->cb_remove(list->ctx, entry);
-			list->cb_clone_free(list->ctx, local_entry);
-			return mlx5_list_cache_insert(list, lcore_index, oentry,
-						      ctx);
+			rte_rwlock_write_unlock(&l_inconst->lock);
+			l_const->cb_remove(l_const->ctx, entry);
+			l_const->cb_clone_free(l_const->ctx, local_entry);
+			return mlx5_list_cache_insert(l_inconst, l_const,
+						      lcore_index,
+						      oentry, ctx);
 		}
 	}
 	/* 5. Update lists. */
-	LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE]->h, entry, next);
-	list->gen_cnt++;
-	rte_rwlock_write_unlock(&list->lock);
-	LIST_INSERT_HEAD(&list->cache[lcore_index]->h, local_entry, next);
-	__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
-	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
+	LIST_INSERT_HEAD(&l_inconst->cache[RTE_MAX_LCORE]->h, entry, next);
+	l_inconst->gen_cnt++;
+	rte_rwlock_write_unlock(&l_inconst->lock);
+	LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
+	__atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
 		(void *)entry, entry->ref_cnt);
 	return local_entry;
 }
 
-int
-mlx5_list_unregister(struct mlx5_list *list,
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
+{
+	return _mlx5_list_register(&list->l_inconst, &list->l_const, ctx);
+}
+
+static inline int
+_mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
+		      struct mlx5_list_const *l_const,
 		      struct mlx5_list_entry *entry)
 {
 	struct mlx5_list_entry *gentry = entry->gentry;
@@ -258,69 +285,77 @@ mlx5_list_unregister(struct mlx5_list *list,
 	MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
 	if (entry->lcore_idx == (uint32_t)lcore_idx) {
 		LIST_REMOVE(entry, next);
-		if (list->lcores_share)
-			list->cb_clone_free(list->ctx, entry);
+		if (l_const->lcores_share)
+			l_const->cb_clone_free(l_const->ctx, entry);
 		else
-			list->cb_remove(list->ctx, entry);
+			l_const->cb_remove(l_const->ctx, entry);
 	} else if (likely(lcore_idx != -1)) {
-		__atomic_add_fetch(&list->cache[entry->lcore_idx]->inv_cnt, 1,
-				   __ATOMIC_RELAXED);
+		__atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+				   1, __ATOMIC_RELAXED);
 	} else {
 		return 0;
 	}
-	if (!list->lcores_share) {
-		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
+	if (!l_const->lcores_share) {
+		__atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
-			list->name, (void *)entry);
+			l_const->name, (void *)entry);
 		return 0;
 	}
 	if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
 		return 1;
-	rte_rwlock_write_lock(&list->lock);
+	rte_rwlock_write_lock(&l_inconst->lock);
 	if (likely(gentry->ref_cnt == 0)) {
 		LIST_REMOVE(gentry, next);
-		rte_rwlock_write_unlock(&list->lock);
-		list->cb_remove(list->ctx, gentry);
-		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
+		rte_rwlock_write_unlock(&l_inconst->lock);
+		l_const->cb_remove(l_const->ctx, gentry);
+		__atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
 		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
-			list->name, (void *)gentry);
+			l_const->name, (void *)gentry);
 		return 0;
 	}
-	rte_rwlock_write_unlock(&list->lock);
+	rte_rwlock_write_unlock(&l_inconst->lock);
 	return 1;
 }
 
+int
+mlx5_list_unregister(struct mlx5_list *list,
+		      struct mlx5_list_entry *entry)
+{
+	return _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry);
+}
+
 static void
-mlx5_list_uninit(struct mlx5_list *list)
+mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
+		 struct mlx5_list_const *l_const)
 {
 	struct mlx5_list_entry *entry;
 	int i;
 
-	MLX5_ASSERT(list);
+	MLX5_ASSERT(l_inconst);
 	for (i = 0; i <= RTE_MAX_LCORE; i++) {
-		if (!list->cache[i])
+		if (!l_inconst->cache[i])
 			continue;
-		while (!LIST_EMPTY(&list->cache[i]->h)) {
-			entry = LIST_FIRST(&list->cache[i]->h);
+		while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
+			entry = LIST_FIRST(&l_inconst->cache[i]->h);
 			LIST_REMOVE(entry, next);
 			if (i == RTE_MAX_LCORE) {
-				list->cb_remove(list->ctx, entry);
+				l_const->cb_remove(l_const->ctx, entry);
 				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
-					"destroyed.", list->name,
+					"destroyed.", l_const->name,
 					(void *)entry);
 			} else {
-				list->cb_clone_free(list->ctx, entry);
+				l_const->cb_clone_free(l_const->ctx, entry);
 			}
 		}
 		if (i != RTE_MAX_LCORE)
-			mlx5_free(list->cache[i]);
+			mlx5_free(l_inconst->cache[i]);
 	}
 }
 
 void
 mlx5_list_destroy(struct mlx5_list *list)
 {
-	mlx5_list_uninit(list);
+	mlx5_list_uninit(&list->l_inconst, &list->l_const);
 	mlx5_free(list);
 }
 
@@ -328,7 +363,7 @@ uint32_t
 mlx5_list_get_entry_num(struct mlx5_list *list)
 {
 	MLX5_ASSERT(list);
-	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
+	return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
 }
 
 /********************* Hash List **********************/
@@ -347,6 +382,11 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
 	uint32_t alloc_size;
 	uint32_t i;
 
+	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
+	    !cb_clone_free) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
 	/* Align to the next power of 2, 32bits integer is enough now. */
 	if (!rte_is_power_of_2(size)) {
 		act_size = rte_align32pow2(size);
@@ -356,7 +396,7 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
 		act_size = size;
 	}
 	alloc_size = sizeof(struct mlx5_hlist) +
-		     sizeof(struct mlx5_hlist_bucket)  * act_size;
+		     sizeof(struct mlx5_hlist_bucket) * act_size;
 	if (lcores_share)
 		alloc_size += sizeof(struct mlx5_list_cache)  * act_size;
 	/* Using zmalloc, then no need to initialize the heads. */
@@ -367,15 +407,21 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
 			name ? name : "None");
 		return NULL;
 	}
+	if (name)
+		snprintf(h->l_const.name, sizeof(h->l_const.name), "%s", name);
+	h->l_const.ctx = ctx;
+	h->l_const.lcores_share = lcores_share;
+	h->l_const.cb_create = cb_create;
+	h->l_const.cb_match = cb_match;
+	h->l_const.cb_remove = cb_remove;
+	h->l_const.cb_clone = cb_clone;
+	h->l_const.cb_clone_free = cb_clone_free;
 	h->mask = act_size - 1;
-	h->lcores_share = lcores_share;
 	h->direct_key = direct_key;
 	gc = (struct mlx5_list_cache *)&h->buckets[act_size];
 	for (i = 0; i < act_size; i++) {
-		if (mlx5_list_init(&h->buckets[i].l, name, ctx, lcores_share,
-				   lcores_share ? &gc[i] : NULL,
-				   cb_create, cb_match, cb_remove, cb_clone,
-				   cb_clone_free) != 0) {
+		if (mlx5_list_init(&h->buckets[i].l, &h->l_const,
+		    lcores_share ? &gc[i] : NULL) != 0) {
 			mlx5_free(h);
 			return NULL;
 		}
@@ -385,6 +431,7 @@ mlx5_hlist_create(const char *name, uint32_t size, bool direct_key,
 	return h;
 }
 
+
 struct mlx5_list_entry *
 mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
 {
@@ -394,7 +441,7 @@ mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
 		idx = (uint32_t)(key & h->mask);
 	else
 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
-	return mlx5_list_lookup(&h->buckets[idx].l, ctx);
+	return _mlx5_list_lookup(&h->buckets[idx].l, &h->l_const, ctx);
 }
 
 struct mlx5_list_entry*
@@ -407,9 +454,9 @@ mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
 		idx = (uint32_t)(key & h->mask);
 	else
 		idx = rte_hash_crc_8byte(key, 0) & h->mask;
-	entry = mlx5_list_register(&h->buckets[idx].l, ctx);
+	entry = _mlx5_list_register(&h->buckets[idx].l, &h->l_const, ctx);
 	if (likely(entry)) {
-		if (h->lcores_share)
+		if (h->l_const.lcores_share)
 			entry->gentry->bucket_idx = idx;
 		else
 			entry->bucket_idx = idx;
@@ -420,10 +467,10 @@ mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
 int
 mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_list_entry *entry)
 {
-	uint32_t idx = h->lcores_share ? entry->gentry->bucket_idx :
+	uint32_t idx = h->l_const.lcores_share ? entry->gentry->bucket_idx :
 							      entry->bucket_idx;
 
-	return mlx5_list_unregister(&h->buckets[idx].l, entry);
+	return _mlx5_list_unregister(&h->buckets[idx].l, &h->l_const, entry);
 }
 
 void
@@ -432,6 +479,6 @@ mlx5_hlist_destroy(struct mlx5_hlist *h)
 	uint32_t i;
 
 	for (i = 0; i <= h->mask; i++)
-		mlx5_list_uninit(&h->buckets[i].l);
+		mlx5_list_uninit(&h->buckets[i].l, &h->l_const);
 	mlx5_free(h);
 }
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index 979dfafad4..9e8ebe772a 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -80,6 +80,32 @@ typedef void (*mlx5_list_clone_free_cb)(void *tool_ctx,
 typedef struct mlx5_list_entry *(*mlx5_list_create_cb)(void *tool_ctx,
 						       void *ctx);
 
+/**
+ * Linked mlx5 list constant object.
+ */
+struct mlx5_list_const {
+	char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
+	void *ctx; /* user objects target to callback. */
+	bool lcores_share; /* Whether to share objects between the lcores. */
+	mlx5_list_create_cb cb_create; /**< entry create callback. */
+	mlx5_list_match_cb cb_match; /**< entry match callback. */
+	mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
+	mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
+	mlx5_list_clone_free_cb cb_clone_free;
+	/**< entry clone free callback. */
+};
+
+/**
+ * Linked mlx5 list inconstant data.
+ */
+struct mlx5_list_inconst {
+	rte_rwlock_t lock; /* read/write lock. */
+	volatile uint32_t gen_cnt; /* List modification may update it. */
+	volatile uint32_t count; /* number of entries in list. */
+	struct mlx5_list_cache *cache[RTE_MAX_LCORE + 1];
+	/* Lcore cache, last index is the global cache. */
+};
+
 /**
  * Linked mlx5 list structure.
  *
@@ -96,19 +122,8 @@ typedef struct mlx5_list_entry *(*mlx5_list_create_cb)(void *tool_ctx,
  *
  */
 struct mlx5_list {
-	char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
-	void *ctx; /* user objects target to callback. */
-	bool lcores_share; /* Whether to share objects between the lcores. */
-	mlx5_list_create_cb cb_create; /**< entry create callback. */
-	mlx5_list_match_cb cb_match; /**< entry match callback. */
-	mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
-	mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
-	mlx5_list_clone_free_cb cb_clone_free;
-	struct mlx5_list_cache *cache[RTE_MAX_LCORE + 1];
-	/* Lcore cache, last index is the global cache. */
-	volatile uint32_t gen_cnt; /* List modification may update it. */
-	volatile uint32_t count; /* number of entries in list. */
-	rte_rwlock_t lock; /* read/write lock. */
+	struct mlx5_list_const l_const;
+	struct mlx5_list_inconst l_inconst;
 };
 
 /**
@@ -214,7 +229,7 @@ mlx5_list_get_entry_num(struct mlx5_list *list);
 
 /* Hash list bucket. */
 struct mlx5_hlist_bucket {
-	struct mlx5_list l;
+	struct mlx5_list_inconst l;
 } __rte_cache_aligned;
 
 /**
@@ -226,7 +241,7 @@ struct mlx5_hlist {
 	uint32_t mask; /* A mask for the bucket index range. */
 	uint8_t flags;
 	bool direct_key; /* Whether to use the key directly as hash index. */
-	bool lcores_share; /* Whether to share objects between the lcores. */
+	struct mlx5_list_const l_const; /* List constant data. */
 	struct mlx5_hlist_bucket buckets[] __rte_cache_aligned;
 };
 
-- 
2.25.1


  parent reply	other threads:[~2021-07-02  6:20 UTC|newest]

Thread overview: 135+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-27  9:33 [dpdk-dev] [PATCH 0/4] net/mlx5: add indexed pool local cache Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 1/4] net/mlx5: add index allocate with up limit Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 2/4] net/mlx5: add indexed pool local cache Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 3/4] net/mlx5: add index pool cache flush Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 4/4] net/mlx5: replace flow list with index pool Suanming Mou
2021-06-30 12:45 ` [dpdk-dev] [PATCH v2 00/22] net/mlx5: insertion rate optimization Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 01/22] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 02/22] net/mlx5: add indexed pool local cache Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 03/22] net/mlx5: add index pool foreach define Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 04/22] net/mlx5: replace flow list with index pool Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 05/22] net/mlx5: optimize modify header action memory Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 06/22] net/mlx5: remove cache term from the list utility Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 07/22] net/mlx5: add per lcore cache to " Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 08/22] net/mlx5: minimize list critical sections Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 09/22] net/mlx5: manage list cache entries release Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 10/22] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 11/22] net/mlx5: allocate list memory by the create API Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 12/22] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 13/22] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 14/22] net/mlx5: adjust the hash bucket size Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 15/22] common/mlx5: allocate cache list memory individually Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 16/22] net/mlx5: enable index pool per-core cache Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 17/22] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 18/22] common/mlx5: optimize cache list object memory Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 19/22] net/mlx5: change memory release configuration Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 20/22] net/mlx5: support index pool none local core operations Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 21/22] net/mlx5: support list " Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 22/22] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-02  6:17 ` [dpdk-dev] [PATCH v3 00/22] net/mlx5: insertion rate optimization Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 01/22] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 02/22] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 03/22] net/mlx5: add index pool foreach define Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 04/22] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 05/22] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 06/22] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 07/22] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 08/22] net/mlx5: minimize list critical sections Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 09/22] net/mlx5: manage list cache entries release Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 10/22] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 11/22] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 12/22] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 13/22] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 14/22] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 15/22] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 16/22] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 17/22] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-02  6:18   ` Suanming Mou [this message]
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 19/22] net/mlx5: change memory release configuration Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 20/22] net/mlx5: support index pool none local core operations Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 21/22] net/mlx5: support list " Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 22/22] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-06 13:32 ` [dpdk-dev] [PATCH v4 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-12  1:46 ` [dpdk-dev] [PATCH v5 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-12 14:59     ` Raslan Darawsheh
2021-07-12 23:26       ` Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-13  8:44 ` [dpdk-dev] [PATCH v6 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-13  8:45   ` [dpdk-dev] [PATCH v6 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-13 15:18   ` [dpdk-dev] [PATCH v6 00/26] net/mlx5: insertion rate optimization Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210702061816.10454-19-suanmingm@nvidia.com \
    --to=suanmingm@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).