From: Hamdan Igbaria <hamdani@nvidia.com>
To: <hamdani@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>, <valex@nvidia.com>
Subject: [v1 3/3] net/mlx5/hws: Enhance forward table and FTE creation
Date: Mon, 20 Mar 2023 16:12:28 +0200 [thread overview]
Message-ID: <20230320141229.104748-3-hamdani@nvidia.com> (raw)
In-Reply-To: <20230320141229.104748-1-hamdani@nvidia.com>
Changed FW FT and FTE creation to allow dynamic creation.
Till now only FTE with vport destination action was supported.
Also enhanced forward table creation to be generic.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 20 +++++++--
drivers/net/mlx5/hws/mlx5dr_cmd.c | 66 +++++++++++++++++------------
drivers/net/mlx5/hws/mlx5dr_cmd.h | 28 +++++++++---
drivers/net/mlx5/hws/mlx5dr_table.c | 12 ++++--
4 files changed, 83 insertions(+), 43 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6b72039bdd..d6af069fae 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -4951,10 +4951,17 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
};
-enum {
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+};
+
+enum mlx5_flow_context_flow_source {
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT = 0x0,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK = 0x1,
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
};
struct mlx5_ifc_set_fte_out_bits {
@@ -4992,11 +4999,16 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
u8 extended_destination[0x1];
- u8 reserved_at_81[0x7];
+ u8 reserved_at_81[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
- u8 reserved_at_c0[0x1740];
+ u8 packet_reformat_id[0x20];
+ u8 reserved_at_e0[0x40];
+ u8 encrypt_decrypt_obj_id[0x20];
+ u8 reserved_at_140[0x16c0];
/* Currently only one destnation */
union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 6e7d6eb1ac..369bc8bf55 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -140,17 +140,18 @@ mlx5dr_cmd_flow_group_create(struct ibv_context *ctx,
return devx_obj;
}
-static struct mlx5dr_devx_obj *
-mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx,
- uint32_t table_type,
- uint32_t table_id,
- uint32_t group_id,
- uint32_t vport_id)
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
void *in_flow_context;
+ uint32_t action_flags;
void *in_dests;
devx_obj = simple_malloc(sizeof(*devx_obj));
@@ -166,50 +167,51 @@ mlx5dr_cmd_set_vport_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- MLX5_SET(flow_context, in_flow_context, action, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+
+ action_flags = fte_attr->action_flags;
+ MLX5_SET(flow_context, in_flow_context, action, action_flags);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type,
- MLX5_FLOW_DESTINATION_TYPE_VPORT);
- MLX5_SET(dest_format, in_dests, destination_id, vport_id);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ /* Only destination_list_size of size 1 is supported */
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ }
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE");
- simple_free(devx_obj);
rte_errno = errno;
- return NULL;
+ goto free_devx;
}
return devx_obj;
-}
-void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
-{
- mlx5dr_cmd_destroy_obj(tbl->fte);
- mlx5dr_cmd_destroy_obj(tbl->fg);
- mlx5dr_cmd_destroy_obj(tbl->ft);
+free_devx:
+ simple_free(devx_obj);
+ return NULL;
}
struct mlx5dr_cmd_forward_tbl *
-mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
- struct mlx5dr_cmd_ft_create_attr *ft_attr,
- uint32_t vport)
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
struct mlx5dr_cmd_fg_attr fg_attr = {0};
struct mlx5dr_cmd_forward_tbl *tbl;
tbl = simple_calloc(1, sizeof(*tbl));
if (!tbl) {
- DR_LOG(ERR, "Failed to allocate memory for forward default");
+ DR_LOG(ERR, "Failed to allocate memory");
rte_errno = ENOMEM;
return NULL;
}
tbl->ft = mlx5dr_cmd_flow_table_create(ctx, ft_attr);
if (!tbl->ft) {
- DR_LOG(ERR, "Failed to create FT for miss-table");
+ DR_LOG(ERR, "Failed to create FT");
goto free_tbl;
}
@@ -218,13 +220,13 @@ mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
tbl->fg = mlx5dr_cmd_flow_group_create(ctx, &fg_attr);
if (!tbl->fg) {
- DR_LOG(ERR, "Failed to create FG for miss-table");
+ DR_LOG(ERR, "Failed to create FG");
goto free_ft;
}
- tbl->fte = mlx5dr_cmd_set_vport_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, vport);
+ tbl->fte = mlx5dr_cmd_set_fte(ctx, ft_attr->type, tbl->ft->id, tbl->fg->id, fte_attr);
if (!tbl->fte) {
- DR_LOG(ERR, "Failed to create FTE for miss-table");
+ DR_LOG(ERR, "Failed to create FTE");
goto free_fg;
}
return tbl;
@@ -238,6 +240,14 @@ mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
return NULL;
}
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl)
+{
+ mlx5dr_cmd_destroy_obj(tbl->fte);
+ mlx5dr_cmd_destroy_obj(tbl->fg);
+ mlx5dr_cmd_destroy_obj(tbl->ft);
+ simple_free(tbl);
+}
+
void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
uint32_t fw_ft_type,
enum mlx5dr_table_type type,
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 7d03f3d169..e57013c309 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,6 +5,13 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+struct mlx5dr_cmd_set_fte_attr {
+ uint32_t action_flags;
+ uint8_t destination_type;
+ uint32_t destination_id;
+ uint8_t flow_source;
+};
+
struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
@@ -263,6 +270,20 @@ mlx5dr_cmd_header_modify_pattern_create(struct ibv_context *ctx,
uint32_t pattern_length,
uint8_t *actions);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
@@ -275,13 +296,6 @@ int mlx5dr_cmd_query_ib_port(struct ibv_context *ctx,
int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
struct mlx5dr_cmd_query_caps *caps);
-void mlx5dr_cmd_miss_ft_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
-
-struct mlx5dr_cmd_forward_tbl *
-mlx5dr_cmd_miss_ft_create(struct ibv_context *ctx,
- struct mlx5dr_cmd_ft_create_attr *ft_attr,
- uint32_t vport);
-
void mlx5dr_cmd_set_attr_connect_miss_tbl(struct mlx5dr_context *ctx,
uint32_t fw_ft_type,
enum mlx5dr_table_type type,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 327e2ec710..8474a9cf61 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -20,6 +20,7 @@ static int
mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
{
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -40,8 +41,12 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
assert(ctx->caps->eswitch_manager);
vport = ctx->caps->eswitch_manager_vport_number;
- default_miss = mlx5dr_cmd_miss_ft_create(mlx5dr_context_get_local_ibv(ctx),
- &ft_attr, vport);
+ fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ fte_attr.destination_id = vport;
+
+ default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
+ &ft_attr, &fte_attr);
if (!default_miss) {
DR_LOG(ERR, "Failed to default miss table type: 0x%x", tbl_type);
return rte_errno;
@@ -66,9 +71,8 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
if (--default_miss->refcount)
return;
- mlx5dr_cmd_miss_ft_destroy(default_miss);
+ mlx5dr_cmd_forward_tbl_destroy(default_miss);
- simple_free(default_miss);
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.26.3
next prev parent reply other threads:[~2023-03-20 14:13 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-20 14:12 [v1 1/3] net/mlx5/hws: support dest root table action Hamdan Igbaria
2023-03-20 14:12 ` [v1 2/3] net/mlx5/hws: dump FT icm addresses Hamdan Igbaria
2023-03-20 14:12 ` Hamdan Igbaria [this message]
2023-05-15 8:24 ` [v1 1/3] net/mlx5/hws: support dest root table action Matan Azrad
2023-05-24 9:01 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230320141229.104748-3-hamdani@nvidia.com \
--to=hamdani@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=thomas@monjalon.net \
--cc=valex@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).