DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
	<thomas@monjalon.net>, <suanmingm@nvidia.com>,
	Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>
Subject: [v5 09/18] net/mlx5/hws: Add HWS pool and buddy
Date: Wed, 19 Oct 2022 23:57:12 +0300	[thread overview]
Message-ID: <20221019205721.8077-10-valex@nvidia.com> (raw)
In-Reply-To: <20221019205721.8077-1-valex@nvidia.com>

From: Erez Shitrit <erezsh@nvidia.com>

HWS needs to manage different types of device memory in
an efficient and quick way. For this, memory pools are
being used.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_buddy.c | 201 +++++++++
 drivers/net/mlx5/hws/mlx5dr_buddy.h |  22 +
 drivers/net/mlx5/hws/mlx5dr_pool.c  | 672 ++++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_pool.h  | 152 +++++++
 4 files changed, 1047 insertions(+)
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_buddy.c
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_buddy.h
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_pool.c
 create mode 100644 drivers/net/mlx5/hws/mlx5dr_pool.h

diff --git a/drivers/net/mlx5/hws/mlx5dr_buddy.c b/drivers/net/mlx5/hws/mlx5dr_buddy.c
new file mode 100644
index 0000000000..9dba95f0b1
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_buddy.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+#include "mlx5dr_internal.h"
+#include "mlx5dr_buddy.h"
+
+static struct rte_bitmap *bitmap_alloc0(int s)
+{
+	struct rte_bitmap *bitmap;
+	uint32_t bmp_size;
+	void *mem;
+
+	bmp_size = rte_bitmap_get_memory_footprint(s);
+	mem = rte_zmalloc("create_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+	if (!mem) {
+		DR_LOG(ERR, "No mem for bitmap");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	bitmap = rte_bitmap_init(s, mem, bmp_size);
+	if (!bitmap) {
+		DR_LOG(ERR, "%s Failed to initialize bitmap", __func__);
+		rte_errno = EINVAL;
+		goto err_mem_alloc;
+	}
+
+	return bitmap;
+
+err_mem_alloc:
+	rte_free(mem);
+	return NULL;
+}
+
+static void bitmap_set_bit(struct rte_bitmap *bmp, uint32_t pos)
+{
+	rte_bitmap_set(bmp, pos);
+}
+
+static void bitmap_clear_bit(struct rte_bitmap *bmp, uint32_t pos)
+{
+	rte_bitmap_clear(bmp, pos);
+}
+
+static bool bitmap_test_bit(struct rte_bitmap *bmp, unsigned long n)
+{
+	return !!rte_bitmap_get(bmp, n);
+}
+
+static unsigned long bitmap_ffs(struct rte_bitmap *bmap,
+				unsigned long n, unsigned long m)
+{
+	uint64_t out_slab = 0;
+	uint32_t pos = 0; /* Compilation warn */
+
+	__rte_bitmap_scan_init(bmap);
+	if (!rte_bitmap_scan(bmap, &pos, &out_slab)) {
+		DR_LOG(ERR, "Failed to get slab from bitmap.");
+		return m;
+	}
+	pos = pos + __builtin_ctzll(out_slab);
+
+	if (pos < n) {
+		DR_LOG(ERR, "Unexpected bit (%d < %"PRIx64") from bitmap", pos, n);
+		return m;
+	}
+	return pos;
+}
+
+static unsigned long mlx5dr_buddy_find_first_bit(struct rte_bitmap *addr,
+						 uint32_t size)
+{
+	return bitmap_ffs(addr, 0, size);
+}
+
+static int mlx5dr_buddy_init(struct mlx5dr_buddy_mem *buddy, uint32_t max_order)
+{
+	int i, s;
+
+	buddy->max_order = max_order;
+
+	buddy->bits = simple_calloc(buddy->max_order + 1, sizeof(long *));
+	if (!buddy->bits) {
+		rte_errno = ENOMEM;
+		return -1;
+	}
+
+	buddy->num_free = simple_calloc(buddy->max_order + 1, sizeof(*buddy->num_free));
+	if (!buddy->num_free) {
+		rte_errno = ENOMEM;
+		goto err_out_free_bits;
+	}
+
+	for (i = 0; i <= (int)buddy->max_order; ++i) {
+		s = 1 << (buddy->max_order - i);
+		buddy->bits[i] = bitmap_alloc0(s);
+		if (!buddy->bits[i])
+			goto err_out_free_num_free;
+	}
+
+	bitmap_set_bit(buddy->bits[buddy->max_order], 0);
+
+	buddy->num_free[buddy->max_order] = 1;
+
+	return 0;
+
+err_out_free_num_free:
+	for (i = 0; i <= (int)buddy->max_order; ++i)
+		rte_free(buddy->bits[i]);
+
+	simple_free(buddy->num_free);
+
+err_out_free_bits:
+	simple_free(buddy->bits);
+	return -1;
+}
+
+struct mlx5dr_buddy_mem *mlx5dr_buddy_create(uint32_t max_order)
+{
+	struct mlx5dr_buddy_mem *buddy;
+
+	buddy = simple_calloc(1, sizeof(*buddy));
+	if (!buddy) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	if (mlx5dr_buddy_init(buddy, max_order))
+		goto free_buddy;
+
+	return buddy;
+
+free_buddy:
+	simple_free(buddy);
+	return NULL;
+}
+
+void mlx5dr_buddy_cleanup(struct mlx5dr_buddy_mem *buddy)
+{
+	int i;
+
+	for (i = 0; i <= (int)buddy->max_order; ++i) {
+		rte_free(buddy->bits[i]);
+	}
+
+	simple_free(buddy->num_free);
+	simple_free(buddy->bits);
+}
+
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_buddy_mem *buddy, int order)
+{
+	int seg;
+	int o, m;
+
+	for (o = order; o <= (int)buddy->max_order; ++o)
+		if (buddy->num_free[o]) {
+			m = 1 << (buddy->max_order - o);
+			seg = mlx5dr_buddy_find_first_bit(buddy->bits[o], m);
+			if (m <= seg)
+				return -1;
+
+			goto found;
+		}
+
+	return -1;
+
+found:
+	bitmap_clear_bit(buddy->bits[o], seg);
+	--buddy->num_free[o];
+
+	while (o > order) {
+		--o;
+		seg <<= 1;
+		bitmap_set_bit(buddy->bits[o], seg ^ 1);
+		++buddy->num_free[o];
+	}
+
+	seg <<= order;
+
+	return seg;
+}
+
+void mlx5dr_buddy_free_mem(struct mlx5dr_buddy_mem *buddy, uint32_t seg, int order)
+{
+	seg >>= order;
+
+	while (bitmap_test_bit(buddy->bits[order], seg ^ 1)) {
+		bitmap_clear_bit(buddy->bits[order], seg ^ 1);
+		--buddy->num_free[order];
+		seg >>= 1;
+		++order;
+	}
+
+	bitmap_set_bit(buddy->bits[order], seg);
+
+	++buddy->num_free[order];
+}
+
diff --git a/drivers/net/mlx5/hws/mlx5dr_buddy.h b/drivers/net/mlx5/hws/mlx5dr_buddy.h
new file mode 100644
index 0000000000..b9ec446b99
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_buddy.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef MLX5DR_BUDDY_H_
+#define MLX5DR_BUDDY_H_
+
+struct mlx5dr_buddy_mem {
+	struct rte_bitmap **bits;
+	unsigned int *num_free;
+	uint32_t max_order;
+};
+
+struct mlx5dr_buddy_mem *mlx5dr_buddy_create(uint32_t max_order);
+
+void mlx5dr_buddy_cleanup(struct mlx5dr_buddy_mem *buddy);
+
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_buddy_mem *buddy, int order);
+
+void mlx5dr_buddy_free_mem(struct mlx5dr_buddy_mem *buddy, uint32_t seg, int order);
+
+#endif /* MLX5DR_BUDDY_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_pool.c b/drivers/net/mlx5/hws/mlx5dr_pool.c
new file mode 100644
index 0000000000..2bfda5b4a5
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_pool.c
@@ -0,0 +1,672 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <rte_bitmap.h>
+#include <rte_malloc.h>
+#include "mlx5dr_buddy.h"
+#include "mlx5dr_internal.h"
+
+static void mlx5dr_pool_free_one_resource(struct mlx5dr_pool_resource *resource)
+{
+	mlx5dr_cmd_destroy_obj(resource->devx_obj);
+
+	simple_free(resource);
+}
+
+static void mlx5dr_pool_resource_free(struct mlx5dr_pool *pool,
+				      int resource_idx)
+{
+	mlx5dr_pool_free_one_resource(pool->resource[resource_idx]);
+	pool->resource[resource_idx] = NULL;
+
+	if (pool->tbl_type == MLX5DR_TABLE_TYPE_FDB) {
+		mlx5dr_pool_free_one_resource(pool->mirror_resource[resource_idx]);
+		pool->mirror_resource[resource_idx] = NULL;
+	}
+}
+
+static struct mlx5dr_pool_resource *
+mlx5dr_pool_create_one_resource(struct mlx5dr_pool *pool, uint32_t log_range,
+				uint32_t fw_ft_type)
+{
+	struct mlx5dr_cmd_ste_create_attr ste_attr;
+	struct mlx5dr_cmd_stc_create_attr stc_attr;
+	struct mlx5dr_pool_resource *resource;
+	struct mlx5dr_devx_obj *devx_obj;
+
+	resource = simple_malloc(sizeof(*resource));
+	if (!resource) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	switch (pool->type) {
+	case MLX5DR_POOL_TYPE_STE:
+		ste_attr.log_obj_range = log_range;
+		ste_attr.table_type = fw_ft_type;
+		devx_obj = mlx5dr_cmd_ste_create(pool->ctx->ibv_ctx, &ste_attr);
+		break;
+	case MLX5DR_POOL_TYPE_STC:
+		stc_attr.log_obj_range = log_range;
+		stc_attr.table_type = fw_ft_type;
+		devx_obj = mlx5dr_cmd_stc_create(pool->ctx->ibv_ctx, &stc_attr);
+		break;
+	default:
+		assert(0);
+		break;
+	}
+
+	if (!devx_obj) {
+		DR_LOG(ERR, "Failed to allocate resource objects");
+		goto free_resource;
+	}
+
+	resource->pool = pool;
+	resource->devx_obj = devx_obj;
+	resource->range = 1 << log_range;
+	resource->base_id = devx_obj->id;
+
+	return resource;
+
+free_resource:
+	simple_free(resource);
+	return NULL;
+}
+
+static int
+mlx5dr_pool_resource_alloc(struct mlx5dr_pool *pool, uint32_t log_range, int idx)
+{
+	struct mlx5dr_pool_resource *resource;
+	uint32_t fw_ft_type, opt_log_range;
+
+	fw_ft_type = mlx5dr_table_get_res_fw_ft_type(pool->tbl_type, false);
+	opt_log_range = pool->opt_type == MLX5DR_POOL_OPTIMIZE_ORIG ? 0 : log_range;
+	resource = mlx5dr_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+	if (!resource) {
+		DR_LOG(ERR, "Failed allocating resource");
+		return rte_errno;
+	}
+	pool->resource[idx] = resource;
+
+	if (pool->tbl_type == MLX5DR_TABLE_TYPE_FDB) {
+		struct mlx5dr_pool_resource *mir_resource;
+
+		fw_ft_type = mlx5dr_table_get_res_fw_ft_type(pool->tbl_type, true);
+		opt_log_range = pool->opt_type == MLX5DR_POOL_OPTIMIZE_MIRROR ? 0 : log_range;
+		mir_resource = mlx5dr_pool_create_one_resource(pool, opt_log_range, fw_ft_type);
+		if (!mir_resource) {
+			DR_LOG(ERR, "Failed allocating mirrored resource");
+			mlx5dr_pool_free_one_resource(resource);
+			pool->resource[idx] = NULL;
+			return rte_errno;
+		}
+		pool->mirror_resource[idx] = mir_resource;
+	}
+
+	return 0;
+}
+
+static int mlx5dr_pool_bitmap_get_free_slot(struct rte_bitmap *bitmap, uint32_t *iidx)
+{
+	uint64_t slab = 0;
+
+	__rte_bitmap_scan_init(bitmap);
+
+	if (!rte_bitmap_scan(bitmap, iidx, &slab))
+		return ENOMEM;
+
+	*iidx += __builtin_ctzll(slab);
+
+	rte_bitmap_clear(bitmap, *iidx);
+
+	return 0;
+}
+
+static struct rte_bitmap *mlx5dr_pool_create_and_init_bitmap(uint32_t log_range)
+{
+	struct rte_bitmap *cur_bmp;
+	uint32_t bmp_size;
+	void *mem;
+
+	bmp_size = rte_bitmap_get_memory_footprint(1 << log_range);
+	mem = rte_zmalloc("create_stc_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
+	if (!mem) {
+		DR_LOG(ERR, "No mem for bitmap");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	cur_bmp = rte_bitmap_init_with_all_set(1 << log_range, mem, bmp_size);
+	if (!cur_bmp) {
+		rte_free(mem);
+		DR_LOG(ERR, "Failed to initialize stc bitmap.");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	return cur_bmp;
+}
+
+static void mlx5dr_pool_buddy_db_put_chunk(struct mlx5dr_pool *pool,
+				      struct mlx5dr_pool_chunk *chunk)
+{
+	struct mlx5dr_buddy_mem *buddy;
+
+	buddy = pool->db.buddy_manager->buddies[chunk->resource_idx];
+	if (!buddy) {
+		assert(false);
+		DR_LOG(ERR, "No such buddy (%d)", chunk->resource_idx);
+		return;
+	}
+
+	mlx5dr_buddy_free_mem(buddy, chunk->offset, chunk->order);
+}
+
+static struct mlx5dr_buddy_mem *
+mlx5dr_pool_buddy_get_next_buddy(struct mlx5dr_pool *pool, int idx,
+				 uint32_t order, bool *is_new_buddy)
+{
+	static struct mlx5dr_buddy_mem *buddy;
+	uint32_t new_buddy_size;
+
+	buddy = pool->db.buddy_manager->buddies[idx];
+	if (buddy)
+		return buddy;
+
+	new_buddy_size = RTE_MAX(pool->alloc_log_sz, order);
+	*is_new_buddy = true;
+	buddy = mlx5dr_buddy_create(new_buddy_size);
+	if (!buddy) {
+		DR_LOG(ERR, "Failed to create buddy order: %d index: %d",
+		       new_buddy_size, idx);
+		return NULL;
+	}
+
+	if (mlx5dr_pool_resource_alloc(pool, new_buddy_size, idx) != 0) {
+		DR_LOG(ERR, "Failed to create resource type: %d: size %d index: %d",
+			pool->type, new_buddy_size, idx);
+		mlx5dr_buddy_cleanup(buddy);
+		return NULL;
+	}
+
+	pool->db.buddy_manager->buddies[idx] = buddy;
+
+	return buddy;
+}
+
+static int mlx5dr_pool_buddy_get_mem_chunk(struct mlx5dr_pool *pool,
+					   int order,
+					   uint32_t *buddy_idx,
+					   int *seg)
+{
+	struct mlx5dr_buddy_mem *buddy;
+	bool new_mem = false;
+	int err = 0;
+	int i;
+
+	*seg = -1;
+
+	/* Find the next free place from the buddy array */
+	while (*seg == -1) {
+		for (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {
+			buddy = mlx5dr_pool_buddy_get_next_buddy(pool, i,
+								 order,
+								 &new_mem);
+			if (!buddy) {
+				err = rte_errno;
+				goto out;
+			}
+
+			*seg = mlx5dr_buddy_alloc_mem(buddy, order);
+			if (*seg != -1)
+				goto found;
+
+			if (pool->flags & MLX5DR_POOL_FLAGS_ONE_RESOURCE) {
+				DR_LOG(ERR, "Fail to allocate seg for one resource pool");
+				err = rte_errno;
+				goto out;
+			}
+
+			if (new_mem) {
+				/* We have new memory pool, should be place for us */
+				assert(false);
+				DR_LOG(ERR, "No memory for order: %d with buddy no: %d",
+					order, i);
+				rte_errno = ENOMEM;
+				err = ENOMEM;
+				goto out;
+			}
+		}
+	}
+
+found:
+	*buddy_idx = i;
+out:
+	return err;
+}
+
+static int mlx5dr_pool_buddy_db_get_chunk(struct mlx5dr_pool *pool,
+				     struct mlx5dr_pool_chunk *chunk)
+{
+	int ret = 0;
+
+	/* Go over the buddies and find next free slot */
+	ret = mlx5dr_pool_buddy_get_mem_chunk(pool, chunk->order,
+					      &chunk->resource_idx,
+					      &chunk->offset);
+	if (ret)
+		DR_LOG(ERR, "Failed to get free slot for chunk with order: %d",
+			chunk->order);
+
+	return ret;
+}
+
+static void mlx5dr_pool_buddy_db_uninit(struct mlx5dr_pool *pool)
+{
+	struct mlx5dr_buddy_mem *buddy;
+	int i;
+
+	for (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {
+		buddy = pool->db.buddy_manager->buddies[i];
+		if (buddy) {
+			mlx5dr_buddy_cleanup(buddy);
+			simple_free(buddy);
+			pool->db.buddy_manager->buddies[i] = NULL;
+		}
+	}
+
+	simple_free(pool->db.buddy_manager);
+}
+
+static int mlx5dr_pool_buddy_db_init(struct mlx5dr_pool *pool, uint32_t log_range)
+{
+	pool->db.buddy_manager = simple_calloc(1, sizeof(*pool->db.buddy_manager));
+	if (!pool->db.buddy_manager) {
+		DR_LOG(ERR, "No mem for buddy_manager with log_range: %d", log_range);
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
+
+	if (pool->flags & MLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE) {
+		bool new_buddy;
+
+		if (!mlx5dr_pool_buddy_get_next_buddy(pool, 0, log_range, &new_buddy)) {
+			DR_LOG(ERR, "Failed allocating memory on create log_sz: %d", log_range);
+			simple_free(pool->db.buddy_manager);
+			return rte_errno;
+		}
+	}
+
+	pool->p_db_uninit = &mlx5dr_pool_buddy_db_uninit;
+	pool->p_get_chunk = &mlx5dr_pool_buddy_db_get_chunk;
+	pool->p_put_chunk = &mlx5dr_pool_buddy_db_put_chunk;
+
+	return 0;
+}
+
+static int mlx5dr_pool_create_resource_on_index(struct mlx5dr_pool *pool,
+						uint32_t alloc_size, int idx)
+{
+	if (mlx5dr_pool_resource_alloc(pool, alloc_size, idx) != 0) {
+		DR_LOG(ERR, "Failed to create resource type: %d: size %d index: %d",
+			pool->type, alloc_size, idx);
+		return rte_errno;
+	}
+
+	return 0;
+}
+
+static struct mlx5dr_pool_elements *
+mlx5dr_pool_element_create_new_elem(struct mlx5dr_pool *pool, uint32_t order, int idx)
+{
+	struct mlx5dr_pool_elements *elem;
+	uint32_t alloc_size;
+
+	alloc_size = pool->alloc_log_sz;
+
+	elem = simple_calloc(1, sizeof(*elem));
+	if (!elem) {
+		DR_LOG(ERR, "Failed to create elem order: %d index: %d",
+		       order, idx);
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	/*sharing the same resource, also means that all the elements are with size 1*/
+	if ((pool->flags & MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS) &&
+	    !(pool->flags & MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK)) {
+		 /* Currently all chunks in size 1 */
+		elem->bitmap =  mlx5dr_pool_create_and_init_bitmap(alloc_size - order);
+		if (!elem->bitmap) {
+			DR_LOG(ERR, "Failed to create bitmap type: %d: size %d index: %d",
+			       pool->type, alloc_size, idx);
+			goto free_elem;
+		}
+	}
+
+	if (mlx5dr_pool_create_resource_on_index(pool, alloc_size, idx)) {
+		DR_LOG(ERR, "Failed to create resource type: %d: size %d index: %d",
+			pool->type, alloc_size, idx);
+		goto free_db;
+	}
+
+	pool->db.element_manager->elements[idx] = elem;
+
+	return elem;
+
+free_db:
+	rte_free(elem->bitmap);
+free_elem:
+	simple_free(elem);
+	return NULL;
+}
+
+static int mlx5dr_pool_element_find_seg(struct mlx5dr_pool_elements *elem, int *seg)
+{
+	if (mlx5dr_pool_bitmap_get_free_slot(elem->bitmap, (uint32_t *)seg)) {
+		elem->is_full = true;
+		return ENOMEM;
+	}
+	return 0;
+}
+
+static int
+mlx5dr_pool_onesize_element_get_mem_chunk(struct mlx5dr_pool *pool, uint32_t order,
+					  uint32_t *idx, int *seg)
+{
+	struct mlx5dr_pool_elements *elem;
+
+	elem = pool->db.element_manager->elements[0];
+	if (!elem)
+		elem = mlx5dr_pool_element_create_new_elem(pool, order, 0);
+	if (!elem)
+		goto err_no_elem;
+
+	*idx = 0;
+
+	if (mlx5dr_pool_element_find_seg(elem, seg) != 0) {
+		DR_LOG(ERR, "No more resources (last request order: %d)", order);
+		rte_errno = ENOMEM;
+		return ENOMEM;
+	}
+
+	elem->num_of_elements++;
+	return 0;
+
+err_no_elem:
+	DR_LOG(ERR, "Failed to allocate element for order: %d", order);
+	return ENOMEM;
+}
+
+static int
+mlx5dr_pool_general_element_get_mem_chunk(struct mlx5dr_pool *pool, uint32_t order,
+					  uint32_t *idx, int *seg)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {
+		if (!pool->resource[i]) {
+			ret = mlx5dr_pool_create_resource_on_index(pool, order, i);
+			if (ret)
+				goto err_no_res;
+			*idx = i;
+			*seg = 0; /* One memory slot in that element */
+			return 0;
+		}
+	}
+
+	rte_errno = ENOMEM;
+	DR_LOG(ERR, "No more resources (last request order: %d)", order);
+	return ENOMEM;
+
+err_no_res:
+	DR_LOG(ERR, "Failed to allocate element for order: %d", order);
+	return ENOMEM;
+}
+
+static int mlx5dr_pool_general_element_db_get_chunk(struct mlx5dr_pool *pool,
+						    struct mlx5dr_pool_chunk *chunk)
+{
+	int ret;
+
+	/* Go over all memory elements and find/allocate free slot */
+	ret = mlx5dr_pool_general_element_get_mem_chunk(pool, chunk->order,
+							&chunk->resource_idx,
+							&chunk->offset);
+	if (ret)
+		DR_LOG(ERR, "Failed to get free slot for chunk with order: %d",
+			chunk->order);
+
+	return ret;
+}
+
+static void mlx5dr_pool_general_element_db_put_chunk(struct mlx5dr_pool *pool,
+						     struct mlx5dr_pool_chunk *chunk)
+{
+	assert(pool->resource[chunk->resource_idx]);
+
+	if (pool->flags & MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE)
+		mlx5dr_pool_resource_free(pool, chunk->resource_idx);
+}
+
+static void mlx5dr_pool_general_element_db_uninit(struct mlx5dr_pool *pool)
+{
+	(void)pool;
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ *	allocate resource and give it.
+ * - When free that chunk:
+ *	the resource is freed.
+ */
+static int mlx5dr_pool_general_element_db_init(struct mlx5dr_pool *pool)
+{
+	pool->db.element_manager = simple_calloc(1, sizeof(*pool->db.element_manager));
+	if (!pool->db.element_manager) {
+		DR_LOG(ERR, "No mem for general elemnt_manager");
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
+
+	pool->p_db_uninit = &mlx5dr_pool_general_element_db_uninit;
+	pool->p_get_chunk = &mlx5dr_pool_general_element_db_get_chunk;
+	pool->p_put_chunk = &mlx5dr_pool_general_element_db_put_chunk;
+
+	return 0;
+}
+
+static void mlx5dr_onesize_element_db_destroy_element(struct mlx5dr_pool *pool,
+						      struct mlx5dr_pool_elements *elem,
+						      struct mlx5dr_pool_chunk *chunk)
+{
+	assert(pool->resource[chunk->resource_idx]);
+
+	mlx5dr_pool_resource_free(pool, chunk->resource_idx);
+
+	simple_free(elem);
+	pool->db.element_manager->elements[chunk->resource_idx] = NULL;
+}
+
+static void mlx5dr_onesize_element_db_put_chunk(struct mlx5dr_pool *pool,
+						struct mlx5dr_pool_chunk *chunk)
+{
+	struct mlx5dr_pool_elements *elem;
+
+	assert(chunk->resource_idx == 0);
+
+	elem = pool->db.element_manager->elements[chunk->resource_idx];
+	if (!elem) {
+		assert(false);
+		DR_LOG(ERR, "No such element (%d)", chunk->resource_idx);
+		return;
+	}
+
+	rte_bitmap_set(elem->bitmap, chunk->offset);
+	elem->is_full = false;
+	elem->num_of_elements--;
+
+	if (pool->flags & MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE &&
+	   !elem->num_of_elements)
+		mlx5dr_onesize_element_db_destroy_element(pool, elem, chunk);
+}
+
+static int mlx5dr_onesize_element_db_get_chunk(struct mlx5dr_pool *pool,
+					       struct mlx5dr_pool_chunk *chunk)
+{
+	int ret = 0;
+
+	/* Go over all memory elements and find/allocate free slot */
+	ret = mlx5dr_pool_onesize_element_get_mem_chunk(pool, chunk->order,
+							&chunk->resource_idx,
+							&chunk->offset);
+	if (ret)
+		DR_LOG(ERR, "Failed to get free slot for chunk with order: %d",
+			chunk->order);
+
+	return ret;
+}
+
+static void mlx5dr_onesize_element_db_uninit(struct mlx5dr_pool *pool)
+{
+	struct mlx5dr_pool_elements *elem;
+	int i;
+
+	for (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++) {
+		elem = pool->db.element_manager->elements[i];
+		if (elem) {
+			if (elem->bitmap)
+				rte_free(elem->bitmap);
+			simple_free(elem);
+			pool->db.element_manager->elements[i] = NULL;
+		}
+	}
+	simple_free(pool->db.element_manager);
+}
+
+/* This memory management works as the following:
+ * - At start doesn't allocate no mem at all.
+ * - When new request for chunk arrived:
+ *  aloocate the first and only slot of memory/resource
+ *  when it ended return error.
+ */
+static int mlx5dr_pool_onesize_element_db_init(struct mlx5dr_pool *pool)
+{
+	pool->db.element_manager = simple_calloc(1, sizeof(*pool->db.element_manager));
+	if (!pool->db.element_manager) {
+		DR_LOG(ERR, "No mem for general elemnt_manager");
+		rte_errno = ENOMEM;
+		return rte_errno;
+	}
+
+	pool->p_db_uninit = &mlx5dr_onesize_element_db_uninit;
+	pool->p_get_chunk = &mlx5dr_onesize_element_db_get_chunk;
+	pool->p_put_chunk = &mlx5dr_onesize_element_db_put_chunk;
+
+	return 0;
+}
+
+static int mlx5dr_pool_db_init(struct mlx5dr_pool *pool,
+			       enum mlx5dr_db_type db_type)
+{
+	int ret;
+
+	if (db_type == MLX5DR_POOL_DB_TYPE_GENERAL_SIZE)
+		ret = mlx5dr_pool_general_element_db_init(pool);
+	else if (db_type == MLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE)
+		ret = mlx5dr_pool_onesize_element_db_init(pool);
+	else
+		ret = mlx5dr_pool_buddy_db_init(pool, pool->alloc_log_sz);
+
+	if (ret) {
+		DR_LOG(ERR, "Failed to init general db : %d (ret: %d)", db_type, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mlx5dr_pool_db_unint(struct mlx5dr_pool *pool)
+{
+	pool->p_db_uninit(pool);
+}
+
+int
+mlx5dr_pool_chunk_alloc(struct mlx5dr_pool *pool,
+			struct mlx5dr_pool_chunk *chunk)
+{
+	int ret;
+
+	pthread_spin_lock(&pool->lock);
+	ret = pool->p_get_chunk(pool, chunk);
+	pthread_spin_unlock(&pool->lock);
+
+	return ret;
+}
+
+void mlx5dr_pool_chunk_free(struct mlx5dr_pool *pool,
+			    struct mlx5dr_pool_chunk *chunk)
+{
+	pthread_spin_lock(&pool->lock);
+	pool->p_put_chunk(pool, chunk);
+	pthread_spin_unlock(&pool->lock);
+}
+
+struct mlx5dr_pool *
+mlx5dr_pool_create(struct mlx5dr_context *ctx, struct mlx5dr_pool_attr *pool_attr)
+{
+	enum mlx5dr_db_type res_db_type;
+	struct mlx5dr_pool *pool;
+
+	pool = simple_calloc(1, sizeof(*pool));
+	if (!pool)
+		return NULL;
+
+	pool->ctx = ctx;
+	pool->type = pool_attr->pool_type;
+	pool->alloc_log_sz = pool_attr->alloc_log_sz;
+	pool->flags = pool_attr->flags;
+	pool->tbl_type = pool_attr->table_type;
+	pool->opt_type = pool_attr->opt_type;
+
+	pthread_spin_init(&pool->lock, PTHREAD_PROCESS_PRIVATE);
+
+	/* Support general db */
+	if (pool->flags == (MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+			    MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK))
+		res_db_type = MLX5DR_POOL_DB_TYPE_GENERAL_SIZE;
+	else if (pool->flags == (MLX5DR_POOL_FLAGS_ONE_RESOURCE |
+				 MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS))
+		res_db_type = MLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE;
+	else
+		res_db_type = MLX5DR_POOL_DB_TYPE_BUDDY;
+
+	pool->alloc_log_sz = pool_attr->alloc_log_sz;
+
+	if (mlx5dr_pool_db_init(pool, res_db_type))
+		goto free_pool;
+
+	return pool;
+
+free_pool:
+	pthread_spin_destroy(&pool->lock);
+	simple_free(pool);
+	return NULL;
+}
+
+int mlx5dr_pool_destroy(struct mlx5dr_pool *pool)
+{
+	int i;
+
+	for (i = 0; i < MLX5DR_POOL_RESOURCE_ARR_SZ; i++)
+		if (pool->resource[i])
+			mlx5dr_pool_resource_free(pool, i);
+
+	mlx5dr_pool_db_unint(pool);
+
+	pthread_spin_destroy(&pool->lock);
+	simple_free(pool);
+	return 0;
+}
diff --git a/drivers/net/mlx5/hws/mlx5dr_pool.h b/drivers/net/mlx5/hws/mlx5dr_pool.h
new file mode 100644
index 0000000000..cd12c3ab9a
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_pool.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef MLX5DR_POOL_H_
+#define MLX5DR_POOL_H_
+
+enum mlx5dr_pool_type {
+	MLX5DR_POOL_TYPE_STE,
+	MLX5DR_POOL_TYPE_STC,
+};
+
+#define MLX5DR_POOL_STC_LOG_SZ 14
+
+#define MLX5DR_POOL_RESOURCE_ARR_SZ 100
+
+struct mlx5dr_pool_chunk {
+	uint32_t resource_idx;
+	/* Internal offset, relative to base index */
+	int      offset;
+	int      order;
+};
+
+struct mlx5dr_pool_resource {
+	struct mlx5dr_pool *pool;
+	struct mlx5dr_devx_obj *devx_obj;
+	uint32_t base_id;
+	uint32_t range;
+};
+
+enum mlx5dr_pool_flags {
+	/* Only a one resource in that pool */
+	MLX5DR_POOL_FLAGS_ONE_RESOURCE = 1 << 0,
+	MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE = 1 << 1,
+	/* No sharing resources between chunks */
+	MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK = 1 << 2,
+	/* All objects are in the same size */
+	MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS = 1 << 3,
+	/* Manged by buddy allocator */
+	MLX5DR_POOL_FLAGS_BUDDY_MANAGED = 1 << 4,
+	/* Allocate pool_type memory on pool creation */
+	MLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE = 1 << 5,
+
+	/* These values should be used by the caller */
+	MLX5DR_POOL_FLAGS_FOR_STC_POOL =
+		MLX5DR_POOL_FLAGS_ONE_RESOURCE |
+		MLX5DR_POOL_FLAGS_FIXED_SIZE_OBJECTS,
+	MLX5DR_POOL_FLAGS_FOR_MATCHER_STE_POOL =
+		MLX5DR_POOL_FLAGS_RELEASE_FREE_RESOURCE |
+		MLX5DR_POOL_FLAGS_RESOURCE_PER_CHUNK,
+	MLX5DR_POOL_FLAGS_FOR_STE_ACTION_POOL =
+		MLX5DR_POOL_FLAGS_ONE_RESOURCE |
+		MLX5DR_POOL_FLAGS_BUDDY_MANAGED |
+		MLX5DR_POOL_FLAGS_ALLOC_MEM_ON_CREATE,
+};
+
+enum mlx5dr_pool_optimize {
+	MLX5DR_POOL_OPTIMIZE_NONE = 0x0,
+	MLX5DR_POOL_OPTIMIZE_ORIG = 0x1,
+	MLX5DR_POOL_OPTIMIZE_MIRROR = 0x2,
+};
+
+struct mlx5dr_pool_attr {
+	enum mlx5dr_pool_type pool_type;
+	enum mlx5dr_table_type table_type;
+	enum mlx5dr_pool_flags flags;
+	enum mlx5dr_pool_optimize opt_type;
+	/* Allocation size once memory is depleted */
+	size_t alloc_log_sz;
+};
+
+enum mlx5dr_db_type {
+	/* Uses for allocating chunk of big memory, each element has its own resource in the FW*/
+	MLX5DR_POOL_DB_TYPE_GENERAL_SIZE,
+	/* One resource only, all the elements are with same one size */
+	MLX5DR_POOL_DB_TYPE_ONE_SIZE_RESOURCE,
+	/* Many resources, the memory allocated with buddy mechanism */
+	MLX5DR_POOL_DB_TYPE_BUDDY,
+};
+
+struct mlx5dr_buddy_manager {
+	struct mlx5dr_buddy_mem *buddies[MLX5DR_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5dr_pool_elements {
+	uint32_t num_of_elements;
+	struct rte_bitmap *bitmap;
+	bool is_full;
+};
+
+struct mlx5dr_element_manager {
+	struct mlx5dr_pool_elements *elements[MLX5DR_POOL_RESOURCE_ARR_SZ];
+};
+
+struct mlx5dr_pool_db {
+	enum mlx5dr_db_type type;
+	union {
+		struct mlx5dr_element_manager *element_manager;
+		struct mlx5dr_buddy_manager *buddy_manager;
+	};
+};
+
+typedef int (*mlx5dr_pool_db_get_chunk)(struct mlx5dr_pool *pool,
+					struct mlx5dr_pool_chunk *chunk);
+typedef void (*mlx5dr_pool_db_put_chunk)(struct mlx5dr_pool *pool,
+					 struct mlx5dr_pool_chunk *chunk);
+typedef void (*mlx5dr_pool_unint_db)(struct mlx5dr_pool *pool);
+
+struct mlx5dr_pool {
+	struct mlx5dr_context *ctx;
+	enum mlx5dr_pool_type type;
+	enum mlx5dr_pool_flags flags;
+	pthread_spinlock_t lock;
+	size_t alloc_log_sz;
+	enum mlx5dr_table_type tbl_type;
+	enum mlx5dr_pool_optimize opt_type;
+	struct mlx5dr_pool_resource *resource[MLX5DR_POOL_RESOURCE_ARR_SZ];
+	struct mlx5dr_pool_resource *mirror_resource[MLX5DR_POOL_RESOURCE_ARR_SZ];
+	/* DB */
+	struct mlx5dr_pool_db db;
+	/* Functions */
+	mlx5dr_pool_unint_db p_db_uninit;
+	mlx5dr_pool_db_get_chunk p_get_chunk;
+	mlx5dr_pool_db_put_chunk p_put_chunk;
+};
+
+struct mlx5dr_pool *
+mlx5dr_pool_create(struct mlx5dr_context *ctx,
+		   struct mlx5dr_pool_attr *pool_attr);
+
+int mlx5dr_pool_destroy(struct mlx5dr_pool *pool);
+
+int mlx5dr_pool_chunk_alloc(struct mlx5dr_pool *pool,
+			    struct mlx5dr_pool_chunk *chunk);
+
+void mlx5dr_pool_chunk_free(struct mlx5dr_pool *pool,
+			    struct mlx5dr_pool_chunk *chunk);
+
+static inline struct mlx5dr_devx_obj *
+mlx5dr_pool_chunk_get_base_devx_obj(struct mlx5dr_pool *pool,
+				    struct mlx5dr_pool_chunk *chunk)
+{
+	return pool->resource[chunk->resource_idx]->devx_obj;
+}
+
+static inline struct mlx5dr_devx_obj *
+mlx5dr_pool_chunk_get_base_devx_obj_mirror(struct mlx5dr_pool *pool,
+					   struct mlx5dr_pool_chunk *chunk)
+{
+	return pool->mirror_resource[chunk->resource_idx]->devx_obj;
+}
+#endif /* MLX5DR_POOL_H_ */
-- 
2.18.1


  parent reply	other threads:[~2022-10-19 20:59 UTC|newest]

Thread overview: 134+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-22 19:03 [v1 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-09-22 19:03 ` [v1 01/19] net/mlx5: split flow item translation Alex Vesker
2022-09-22 19:03 ` [v1 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-09-22 19:03 ` [v1 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-09-22 19:03 ` [v1 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-09-22 19:03 ` [v1 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-09-22 19:03 ` [v1 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-09-22 19:03 ` [v1 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-09-22 19:03 ` [v1 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-09-22 19:03 ` [v1 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-09-22 19:03 ` [v1 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-09-22 19:03 ` [v1 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-09-22 19:03 ` [v1 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-09-22 19:03 ` [v1 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-09-22 19:03 ` [v1 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-09-22 19:03 ` [v1 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-09-22 19:03 ` [v1 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-09-22 19:03 ` [v1 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-09-22 19:03 ` [v1 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-09-22 19:03 ` [v1 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-06 15:03 ` [v2 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-06 15:03   ` [v2 01/19] net/mlx5: split flow item translation Alex Vesker
2022-10-06 15:03   ` [v2 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-06 15:03   ` [v2 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-06 15:03   ` [v2 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-06 15:03   ` [v2 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-10-06 15:03   ` [v2 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-10-06 15:03   ` [v2 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-06 15:03   ` [v2 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-10-06 15:03   ` [v2 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-06 15:03   ` [v2 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-06 15:03   ` [v2 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-06 15:03   ` [v2 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-06 15:03   ` [v2 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-06 15:03   ` [v2 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-06 15:03   ` [v2 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-06 15:03   ` [v2 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-06 15:03   ` [v2 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-06 15:03   ` [v2 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-06 15:03   ` [v2 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-14 11:48 ` [v3 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-14 11:48   ` [v3 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-14 11:48   ` [v3 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-14 11:48   ` [v3 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-14 11:48   ` [v3 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-14 11:48   ` [v3 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-14 11:48   ` [v3 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-14 11:48   ` [v3 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-14 11:48   ` [v3 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-14 11:48   ` [v3 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-14 11:48   ` [v3 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-14 11:48   ` [v3 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-14 11:48   ` [v3 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-14 11:48   ` [v3 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-14 11:48   ` [v3 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-14 11:48   ` [v3 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-14 11:48   ` [v3 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-14 11:48   ` [v3 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-14 11:48   ` [v3 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 14:42 ` [v4 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 14:42   ` [v4 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 14:42   ` [v4 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 14:42   ` [v4 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 14:42   ` [v4 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 14:42   ` [v4 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 14:42   ` [v4 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 14:42   ` [v4 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 14:42   ` [v4 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 14:42   ` [v4 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-19 14:42   ` [v4 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 14:42   ` [v4 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 14:42   ` [v4 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 14:42   ` [v4 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 14:42   ` [v4 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 14:42   ` [v4 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 14:42   ` [v4 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 14:42   ` [v4 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 14:42   ` [v4 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 20:57 ` [v5 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 20:57   ` [v5 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 20:57   ` [v5 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 20:57   ` [v5 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 20:57   ` [v5 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 20:57   ` [v5 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 20:57   ` [v5 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 20:57   ` [v5 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 20:57   ` [v5 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 20:57   ` Alex Vesker [this message]
2022-10-19 20:57   ` [v5 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 20:57   ` [v5 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 20:57   ` [v5 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 20:57   ` [v5 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 20:57   ` [v5 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 20:57   ` [v5 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 20:57   ` [v5 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 20:57   ` [v5 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 20:57   ` [v5 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-20 15:57 ` [v6 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-20 15:57   ` [v6 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-24  6:47     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-24  6:49     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-24  6:50     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-24  6:50     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-24  6:50     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-24  6:51     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-24  6:52     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-24  6:52     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-24  6:52     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-24  6:53     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-24  6:53     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-24  6:53     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-24  6:54     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-24  6:54     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-24  6:54     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-20 15:57   ` [v6 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-24  6:54     ` Slava Ovsiienko
2022-10-20 15:57   ` [v6 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-24  6:54     ` Slava Ovsiienko
2022-10-24 10:56   ` [v6 00/18] net/mlx5: Add HW steering low level support Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221019205721.8077-10-valex@nvidia.com \
    --to=valex@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=erezsh@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).