From: Suanming Mou <suanmingm@nvidia.com>
To: Dariusz Sosnowski <dsosnowski@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Bing Zhao <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>, Alex Vesker <valex@nvidia.com>
Subject: [PATCH 1/3] net/mlx5/hws: support jump FDB Rx
Date: Tue, 25 Feb 2025 08:45:25 +0800 [thread overview]
Message-ID: <20250225004527.2066812-2-suanmingm@nvidia.com> (raw)
In-Reply-To: <20250225004527.2066812-1-suanmingm@nvidia.com>
Before FW introduced the JUMP_FDB_RX action feature, jump from
FDB Tx to Rx is not allowed. JUMP_FDB_RX feature introduces
the internal loopback for Tx case and allow the REG C0 C1 B be
preserved as well.
This commit adds the JUMP_FDB_RX cap bit check and use
JUMP_FDB_RX instead of FT for dest table FDB Rx case.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 9 ++++++++-
drivers/net/mlx5/hws/mlx5dr_action.c | 26 +++++++++++++++++++++-----
drivers/net/mlx5/hws/mlx5dr_action.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 9 +++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 ++
drivers/net/mlx5/hws/mlx5dr_context.c | 17 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_context.h | 2 ++
7 files changed, 63 insertions(+), 6 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 3fc3b0cd2a..84e3347794 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2466,7 +2466,8 @@ struct mlx5_ifc_wqe_based_flow_table_cap_bits {
u8 reserved_at_60[0x8];
u8 max_header_modify_pattern_length[0x8];
u8 ste_format[0x10];
- u8 stc_action_type[0x80];
+ u8 stc_action_type_63_0[0x40];
+ u8 stc_action_type_127_64[0x40];
u8 header_insert_type[0x10];
u8 header_remove_type[0x10];
u8 trivial_match_definer[0x20];
@@ -3543,6 +3544,11 @@ enum mlx5_ifc_rtc_reparse_mode {
MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
};
+enum mlx5_ifc_stc_action_type_bit_index {
+ MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX = 64,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX = 71,
+};
+
#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
struct mlx5_ifc_rtc_bits {
@@ -3621,6 +3627,7 @@ enum mlx5_ifc_stc_action_type {
MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX = 0x87,
};
enum mlx5_ifc_stc_reparse_mode {
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index b9452a3ebc..e21db5b327 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -803,6 +803,9 @@ int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[],
switch (action->type) {
case MLX5DR_ACTION_TYP_TBL:
+ attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX;
+ attr[i].obj = action->dest_tbl.devx_obj->obj;
+ break;
case MLX5DR_ACTION_TYP_TIR:
attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX;
attr[i].obj = action->devx_obj;
@@ -1097,6 +1100,17 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj->id;
+ /* Only for unified FDB Rx case */
+ if (mlx5dr_context_cap_stc(action->ctx,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX) &&
+ action->dest_tbl.type == MLX5DR_TABLE_TYPE_FDB_RX)
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX;
+ else
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+
+ break;
case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
@@ -1419,17 +1433,19 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
if (!action)
return NULL;
+ action->dest_tbl.type = tbl->type;
+
if (mlx5dr_action_is_root_flags(flags)) {
if (mlx5dr_context_shared_gvmi_used(ctx))
- action->devx_obj = tbl->local_ft->obj;
+ action->dest_tbl.devx_obj = tbl->local_ft;
else
- action->devx_obj = tbl->ft->obj;
+ action->dest_tbl.devx_obj = tbl->ft;
} else {
+ action->dest_tbl.devx_obj = tbl->ft;
+
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
-
- action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -2526,7 +2542,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
case MLX5DR_ACTION_TYP_TBL:
dest_list[i].destination_type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ dest_list[i].destination_id = dests[i].dest->dest_tbl.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index b779e578fd..a582e2abbe 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -226,6 +226,10 @@ struct mlx5dr_action {
struct {
struct mlx5dr_matcher *matcher;
} jump_to_matcher;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ enum mlx5dr_table_type type;
+ } dest_tbl;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index f609135ccb..5d1138099d 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -466,6 +466,7 @@ mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX:
MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
break;
case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
@@ -1280,6 +1281,14 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->fdb_unified_en = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
fdb_unified_en);
+
+ caps->stc_action_type_63_0 = MLX5_GET64(query_hca_cap_out,
+ out,
+ capability.wqe_based_flow_table_cap.stc_action_type_63_0);
+
+ caps->stc_action_type_127_64 = MLX5_GET64(query_hca_cap_out,
+ out,
+ capability.wqe_based_flow_table_cap.stc_action_type_127_64);
}
if (caps->eswitch_manager) {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 3c615b8925..eb9643c555 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -250,6 +250,8 @@ struct mlx5dr_cmd_query_caps {
bool roce;
uint16_t roce_max_src_udp_port;
uint16_t roce_min_src_udp_port;
+ uint64_t stc_action_type_63_0;
+ uint64_t stc_action_type_127_64;
bool fdb_unified_en;
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.c b/drivers/net/mlx5/hws/mlx5dr_context.c
index 91d05f1f86..a854b83ad8 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.c
+++ b/drivers/net/mlx5/hws/mlx5dr_context.c
@@ -140,6 +140,23 @@ static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
return 0;
}
+bool mlx5dr_context_cap_stc(struct mlx5dr_context *ctx, uint32_t bit)
+{
+ uint32_t test_bit = bit;
+
+ if (bit >= MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX)
+ test_bit -= MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX;
+
+ switch (bit) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX:
+ return ctx->caps->stc_action_type_127_64 & (0x1ull << test_bit);
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
{
struct mlx5dr_cmd_query_caps *caps = ctx->caps;
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.h b/drivers/net/mlx5/hws/mlx5dr_context.h
index e89a093c77..deb7196e39 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.h
+++ b/drivers/net/mlx5/hws/mlx5dr_context.h
@@ -75,4 +75,6 @@ uint8_t mlx5dr_context_get_reparse_mode(struct mlx5dr_context *ctx);
void mlx5dr_context_set_pool_tbl_attr(struct mlx5dr_pool_attr *attr,
enum mlx5dr_table_type table_type);
+bool mlx5dr_context_cap_stc(struct mlx5dr_context *ctx, uint32_t bit);
+
#endif /* MLX5DR_CONTEXT_H_ */
--
2.34.1
next prev parent reply other threads:[~2025-02-25 0:46 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-25 0:45 [PATCH 0/3] net/mlx5: " Suanming Mou
2025-02-25 0:45 ` Suanming Mou [this message]
2025-02-25 0:45 ` [PATCH 2/3] net/mlx5: add jump FDB Rx flag Suanming Mou
2025-02-25 0:45 ` [PATCH 3/3] net/mlx5: allow FDB RSS Suanming Mou
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250225004527.2066812-2-suanmingm@nvidia.com \
--to=suanmingm@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=valex@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).