From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>
Subject: [v1 16/16] net/mlx5/hws: cache definer for reuse
Date: Tue, 31 Jan 2023 11:33:45 +0200 [thread overview]
Message-ID: <20230131093346.1261066-17-valex@nvidia.com> (raw)
In-Reply-To: <20230131093346.1261066-1-valex@nvidia.com>
Definers are a limited resource in the system per GVMI, to
avoid failure we try to improve bt checking if it is possible
to reuse the definers in some cases. Added a cache on the context
for this purpose.
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_context.c | 12 ++-
drivers/net/mlx5/hws/mlx5dr_context.h | 1 +
drivers/net/mlx5/hws/mlx5dr_definer.c | 122 ++++++++++++++++++++++----
drivers/net/mlx5/hws/mlx5dr_definer.h | 14 +++
4 files changed, 130 insertions(+), 19 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.c b/drivers/net/mlx5/hws/mlx5dr_context.c
index 6627337d9e..08a5ee92a5 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.c
+++ b/drivers/net/mlx5/hws/mlx5dr_context.c
@@ -13,6 +13,9 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
if (mlx5dr_pat_init_pattern_cache(&ctx->pattern_cache))
return rte_errno;
+ if (mlx5dr_definer_init_cache(&ctx->definer_cache))
+ goto uninit_pat_cache;
+
/* Create an STC pool per FT type */
pool_attr.pool_type = MLX5DR_POOL_TYPE_STC;
pool_attr.flags = MLX5DR_POOL_FLAGS_FOR_STC_POOL;
@@ -35,8 +38,10 @@ static int mlx5dr_context_pools_init(struct mlx5dr_context *ctx)
if (ctx->stc_pool[i])
mlx5dr_pool_destroy(ctx->stc_pool[i]);
- mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
+ mlx5dr_definer_uninit_cache(ctx->definer_cache);
+uninit_pat_cache:
+ mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
return rte_errno;
}
@@ -44,12 +49,13 @@ static void mlx5dr_context_pools_uninit(struct mlx5dr_context *ctx)
{
int i;
- mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
-
for (i = 0; i < MLX5DR_TABLE_TYPE_MAX; i++) {
if (ctx->stc_pool[i])
mlx5dr_pool_destroy(ctx->stc_pool[i]);
}
+
+ mlx5dr_definer_uninit_cache(ctx->definer_cache);
+ mlx5dr_pat_uninit_pattern_cache(ctx->pattern_cache);
}
static int mlx5dr_context_init_pd(struct mlx5dr_context *ctx,
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.h b/drivers/net/mlx5/hws/mlx5dr_context.h
index a38d9484b3..0ba8d0c92e 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.h
+++ b/drivers/net/mlx5/hws/mlx5dr_context.h
@@ -39,6 +39,7 @@ struct mlx5dr_context {
struct mlx5dr_context_common_res common_res[MLX5DR_TABLE_TYPE_MAX];
struct mlx5dr_context_shared_gvmi_res gvmi_res[MLX5DR_TABLE_TYPE_MAX];
struct mlx5dr_pattern_cache *pattern_cache;
+ struct mlx5dr_definer_cache *definer_cache;
pthread_spinlock_t ctrl_lock;
enum mlx5dr_context_flags flags;
struct mlx5dr_send_engine *send_queue;
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index cf84fbea71..b91f98ee8f 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -2061,6 +2061,7 @@ mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
{
int i;
+ /* Future: Optimize by comparing selectors with valid mask only */
for (i = 0; i < BYTE_SELECTORS; i++)
if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
return 1;
@@ -2133,15 +2134,106 @@ mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
return rte_errno;
}
+int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache)
+{
+ struct mlx5dr_definer_cache *new_cache;
+
+ new_cache = simple_calloc(1, sizeof(*new_cache));
+ if (!new_cache) {
+ rte_errno = ENOMEM;
+ return rte_errno;
+ }
+ LIST_INIT(&new_cache->head);
+ *cache = new_cache;
+
+ return 0;
+}
+
+void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache)
+{
+ simple_free(cache);
+}
+
+static struct mlx5dr_devx_obj *
+mlx5dr_definer_get_obj(struct mlx5dr_context *ctx,
+ struct mlx5dr_definer *definer)
+{
+ struct mlx5dr_definer_cache *cache = ctx->definer_cache;
+ struct mlx5dr_cmd_definer_create_attr def_attr = {0};
+ struct mlx5dr_definer_cache_item *cached_definer;
+ struct mlx5dr_devx_obj *obj;
+
+ /* Search definer cache for requested definer */
+ LIST_FOREACH(cached_definer, &cache->head, next) {
+ if (mlx5dr_definer_compare(&cached_definer->definer, definer))
+ continue;
+
+ /* Reuse definer and set LRU (move to be first in the list) */
+ LIST_REMOVE(cached_definer, next);
+ LIST_INSERT_HEAD(&cache->head, cached_definer, next);
+ cached_definer->refcount++;
+ return cached_definer->definer.obj;
+ }
+
+ /* Allocate and create definer based on the bitmask tag */
+ def_attr.match_mask = definer->mask.jumbo;
+ def_attr.dw_selector = definer->dw_selector;
+ def_attr.byte_selector = definer->byte_selector;
+
+ obj = mlx5dr_cmd_definer_create(ctx->ibv_ctx, &def_attr);
+ if (!obj)
+ return NULL;
+
+ cached_definer = simple_calloc(1, sizeof(*cached_definer));
+ if (!cached_definer) {
+ rte_errno = ENOMEM;
+ goto free_definer_obj;
+ }
+
+ memcpy(&cached_definer->definer, definer, sizeof(*definer));
+ cached_definer->definer.obj = obj;
+ cached_definer->refcount = 1;
+ LIST_INSERT_HEAD(&cache->head, cached_definer, next);
+
+ return obj;
+
+free_definer_obj:
+ mlx5dr_cmd_destroy_obj(obj);
+ return NULL;
+}
+
+static void
+mlx5dr_definer_put_obj(struct mlx5dr_context *ctx,
+ struct mlx5dr_devx_obj *obj)
+{
+ struct mlx5dr_definer_cache_item *cached_definer;
+
+ LIST_FOREACH(cached_definer, &ctx->definer_cache->head, next) {
+ if (cached_definer->definer.obj != obj)
+ continue;
+
+ /* Object found */
+ if (--cached_definer->refcount)
+ return;
+
+ LIST_REMOVE(cached_definer, next);
+ mlx5dr_cmd_destroy_obj(cached_definer->definer.obj);
+ simple_free(cached_definer);
+ return;
+ }
+
+ /* Programming error, object must be part of cache */
+ assert(false);
+}
+
static struct mlx5dr_definer *
-mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
+mlx5dr_definer_alloc(struct mlx5dr_context *ctx,
struct mlx5dr_definer_fc *fc,
int fc_sz,
struct rte_flow_item *items,
struct mlx5dr_definer *layout,
bool bind_fc)
{
- struct mlx5dr_cmd_definer_create_attr def_attr = {0};
struct mlx5dr_definer *definer;
int ret;
@@ -2166,12 +2258,7 @@ mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
/* Create the tag mask used for definer creation */
mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
- /* Create definer based on the bitmask tag */
- def_attr.match_mask = definer->mask.jumbo;
- def_attr.dw_selector = layout->dw_selector;
- def_attr.byte_selector = layout->byte_selector;
-
- definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
+ definer->obj = mlx5dr_definer_get_obj(ctx, definer);
if (!definer->obj)
goto free_definer;
@@ -2183,9 +2270,10 @@ mlx5dr_definer_alloc(struct ibv_context *ibv_ctx,
}
static void
-mlx5dr_definer_free(struct mlx5dr_definer *definer)
+mlx5dr_definer_free(struct mlx5dr_context *ctx,
+ struct mlx5dr_definer *definer)
{
- mlx5dr_cmd_destroy_obj(definer->obj);
+ mlx5dr_definer_put_obj(ctx, definer->obj);
simple_free(definer);
}
@@ -2199,7 +2287,7 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
/* Create mendatory match definer */
for (i = 0; i < matcher->num_of_mt; i++) {
- mt[i].definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+ mt[i].definer = mlx5dr_definer_alloc(ctx,
mt[i].fc,
mt[i].fc_sz,
mt[i].items,
@@ -2214,7 +2302,7 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
free_definers:
while (i--)
- mlx5dr_definer_free(mt[i].definer);
+ mlx5dr_definer_free(ctx, mt[i].definer);
return rte_errno;
}
@@ -2222,10 +2310,11 @@ mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
static void
mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
{
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
int i;
for (i = 0; i < matcher->num_of_mt; i++)
- mlx5dr_definer_free(matcher->mt[i].definer);
+ mlx5dr_definer_free(ctx, matcher->mt[i].definer);
}
static int
@@ -2249,7 +2338,7 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
/* Create definer without fcr binding, already binded */
- mt[i].range_definer = mlx5dr_definer_alloc(ctx->ibv_ctx,
+ mt[i].range_definer = mlx5dr_definer_alloc(ctx,
mt[i].fcr,
mt[i].fcr_sz,
mt[i].items,
@@ -2265,7 +2354,7 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
free_definers:
while (i--)
if (mt[i].range_definer)
- mlx5dr_definer_free(mt[i].range_definer);
+ mlx5dr_definer_free(ctx, mt[i].range_definer);
return rte_errno;
}
@@ -2273,11 +2362,12 @@ mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
static void
mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
{
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
int i;
for (i = 0; i < matcher->num_of_mt; i++)
if (matcher->mt[i].range_definer)
- mlx5dr_definer_free(matcher->mt[i].range_definer);
+ mlx5dr_definer_free(ctx, matcher->mt[i].range_definer);
}
static int
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index dd9a297007..464872acd6 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -569,6 +569,16 @@ struct mlx5dr_definer {
struct mlx5dr_devx_obj *obj;
};
+struct mlx5dr_definer_cache {
+ LIST_HEAD(definer_head, mlx5dr_definer_cache_item) head;
+};
+
+struct mlx5dr_definer_cache_item {
+ struct mlx5dr_definer definer;
+ uint32_t refcount;
+ LIST_ENTRY(mlx5dr_definer_cache_item) next;
+};
+
static inline bool
mlx5dr_definer_is_jumbo(struct mlx5dr_definer *definer)
{
@@ -592,4 +602,8 @@ int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher);
+int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache);
+
+void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache);
+
#endif
--
2.18.1
next prev parent reply other threads:[~2023-01-31 9:36 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-31 9:33 [v1 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-01-31 9:33 ` [v1 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-01-31 9:33 ` [v1 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-01-31 9:33 ` [v1 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-01-31 9:33 ` [v1 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-01-31 9:33 ` [v1 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-01-31 9:33 ` [v1 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-01-31 9:33 ` [v1 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-01-31 9:33 ` [v1 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-01-31 9:33 ` [v1 09/16] net/mlx5/hws: support range match Alex Vesker
2023-01-31 9:33 ` [v1 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-01-31 9:33 ` [v1 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-01-31 9:33 ` [v1 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-01-31 9:33 ` [v1 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-01-31 9:33 ` [v1 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-01-31 9:33 ` [v1 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-01-31 9:33 ` Alex Vesker [this message]
2023-02-01 7:27 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-02-01 7:28 ` [v2 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-02-01 7:28 ` [v2 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-02-01 7:28 ` [v2 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-02-01 7:28 ` [v2 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-02-01 7:28 ` [v2 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-02-01 7:28 ` [v2 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-02-01 7:28 ` [v2 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-02-01 7:28 ` [v2 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-02-01 7:28 ` [v2 09/16] net/mlx5/hws: support range match Alex Vesker
2023-02-01 7:28 ` [v2 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-02-01 7:28 ` [v2 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-02-01 7:28 ` [v2 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-02-01 7:28 ` [v2 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-02-01 7:28 ` [v2 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-02-01 7:28 ` [v2 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-02-01 7:28 ` [v2 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-06 15:07 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Matan Azrad
2023-02-13 8:27 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230131093346.1261066-17-valex@nvidia.com \
--to=valex@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).