* [PATCH 1/3] net/mlx5/hws: support jump FDB Rx
2025-02-25 0:45 [PATCH 0/3] net/mlx5: support jump FDB Rx Suanming Mou
@ 2025-02-25 0:45 ` Suanming Mou
2025-02-25 0:45 ` [PATCH 2/3] net/mlx5: add jump FDB Rx flag Suanming Mou
2025-02-25 0:45 ` [PATCH 3/3] net/mlx5: allow FDB RSS Suanming Mou
2 siblings, 0 replies; 4+ messages in thread
From: Suanming Mou @ 2025-02-25 0:45 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Matan Azrad
Cc: dev, rasland, Alex Vesker
Before FW introduced the JUMP_FDB_RX action feature, jump from
FDB Tx to Rx is not allowed. JUMP_FDB_RX feature introduces
the internal loopback for Tx case and allow the REG C0 C1 B be
preserved as well.
This commit adds the JUMP_FDB_RX cap bit check and use
JUMP_FDB_RX instead of FT for dest table FDB Rx case.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 9 ++++++++-
drivers/net/mlx5/hws/mlx5dr_action.c | 26 +++++++++++++++++++++-----
drivers/net/mlx5/hws/mlx5dr_action.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 9 +++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 ++
drivers/net/mlx5/hws/mlx5dr_context.c | 17 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_context.h | 2 ++
7 files changed, 63 insertions(+), 6 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 3fc3b0cd2a..84e3347794 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2466,7 +2466,8 @@ struct mlx5_ifc_wqe_based_flow_table_cap_bits {
u8 reserved_at_60[0x8];
u8 max_header_modify_pattern_length[0x8];
u8 ste_format[0x10];
- u8 stc_action_type[0x80];
+ u8 stc_action_type_63_0[0x40];
+ u8 stc_action_type_127_64[0x40];
u8 header_insert_type[0x10];
u8 header_remove_type[0x10];
u8 trivial_match_definer[0x20];
@@ -3543,6 +3544,11 @@ enum mlx5_ifc_rtc_reparse_mode {
MLX5_IFC_RTC_REPARSE_BY_STC = 0x2,
};
+enum mlx5_ifc_stc_action_type_bit_index {
+ MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX = 64,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX = 71,
+};
+
#define MLX5_IFC_RTC_LINEAR_LOOKUP_TBL_LOG_MAX 16
struct mlx5_ifc_rtc_bits {
@@ -3621,6 +3627,7 @@ enum mlx5_ifc_stc_action_type {
MLX5_IFC_STC_ACTION_TYPE_ALLOW = 0x84,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_VPORT = 0x85,
MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_UPLINK = 0x86,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX = 0x87,
};
enum mlx5_ifc_stc_reparse_mode {
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index b9452a3ebc..e21db5b327 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -803,6 +803,9 @@ int mlx5dr_action_root_build_attr(struct mlx5dr_rule_action rule_actions[],
switch (action->type) {
case MLX5DR_ACTION_TYP_TBL:
+ attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX;
+ attr[i].obj = action->dest_tbl.devx_obj->obj;
+ break;
case MLX5DR_ACTION_TYP_TIR:
attr[i].type = MLX5DV_FLOW_ACTION_DEST_DEVX;
attr[i].obj = action->devx_obj;
@@ -1097,6 +1100,17 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
+ attr->dest_table_id = obj->id;
+ /* Only for unified FDB Rx case */
+ if (mlx5dr_context_cap_stc(action->ctx,
+ MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX) &&
+ action->dest_tbl.type == MLX5DR_TABLE_TYPE_FDB_RX)
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX;
+ else
+ attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
+
+ break;
case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
@@ -1419,17 +1433,19 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
if (!action)
return NULL;
+ action->dest_tbl.type = tbl->type;
+
if (mlx5dr_action_is_root_flags(flags)) {
if (mlx5dr_context_shared_gvmi_used(ctx))
- action->devx_obj = tbl->local_ft->obj;
+ action->dest_tbl.devx_obj = tbl->local_ft;
else
- action->devx_obj = tbl->ft->obj;
+ action->dest_tbl.devx_obj = tbl->ft;
} else {
+ action->dest_tbl.devx_obj = tbl->ft;
+
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
-
- action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -2526,7 +2542,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
case MLX5DR_ACTION_TYP_TBL:
dest_list[i].destination_type =
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ dest_list[i].destination_id = dests[i].dest->dest_tbl.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index b779e578fd..a582e2abbe 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -226,6 +226,10 @@ struct mlx5dr_action {
struct {
struct mlx5dr_matcher *matcher;
} jump_to_matcher;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ enum mlx5dr_table_type type;
+ } dest_tbl;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index f609135ccb..5d1138099d 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -466,6 +466,7 @@ mlx5dr_cmd_stc_modify_set_stc_param(struct mlx5dr_cmd_stc_modify_attr *stc_attr,
MLX5_SET(stc_ste_param_tir, stc_param, tirn, stc_attr->dest_tir_num);
break;
case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT:
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FLOW_TABLE_FDB_RX:
MLX5_SET(stc_ste_param_table, stc_param, table_id, stc_attr->dest_table_id);
break;
case MLX5_IFC_STC_ACTION_TYPE_ACC_MODIFY_LIST:
@@ -1280,6 +1281,14 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->fdb_unified_en = MLX5_GET(query_hca_cap_out, out,
capability.wqe_based_flow_table_cap.
fdb_unified_en);
+
+ caps->stc_action_type_63_0 = MLX5_GET64(query_hca_cap_out,
+ out,
+ capability.wqe_based_flow_table_cap.stc_action_type_63_0);
+
+ caps->stc_action_type_127_64 = MLX5_GET64(query_hca_cap_out,
+ out,
+ capability.wqe_based_flow_table_cap.stc_action_type_127_64);
}
if (caps->eswitch_manager) {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 3c615b8925..eb9643c555 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -250,6 +250,8 @@ struct mlx5dr_cmd_query_caps {
bool roce;
uint16_t roce_max_src_udp_port;
uint16_t roce_min_src_udp_port;
+ uint64_t stc_action_type_63_0;
+ uint64_t stc_action_type_127_64;
bool fdb_unified_en;
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.c b/drivers/net/mlx5/hws/mlx5dr_context.c
index 91d05f1f86..a854b83ad8 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.c
+++ b/drivers/net/mlx5/hws/mlx5dr_context.c
@@ -140,6 +140,23 @@ static int mlx5dr_context_uninit_pd(struct mlx5dr_context *ctx)
return 0;
}
+bool mlx5dr_context_cap_stc(struct mlx5dr_context *ctx, uint32_t bit)
+{
+ uint32_t test_bit = bit;
+
+ if (bit >= MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX)
+ test_bit -= MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX;
+
+ switch (bit) {
+ case MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX:
+ return ctx->caps->stc_action_type_127_64 & (0x1ull << test_bit);
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void mlx5dr_context_check_hws_supp(struct mlx5dr_context *ctx)
{
struct mlx5dr_cmd_query_caps *caps = ctx->caps;
diff --git a/drivers/net/mlx5/hws/mlx5dr_context.h b/drivers/net/mlx5/hws/mlx5dr_context.h
index e89a093c77..deb7196e39 100644
--- a/drivers/net/mlx5/hws/mlx5dr_context.h
+++ b/drivers/net/mlx5/hws/mlx5dr_context.h
@@ -75,4 +75,6 @@ uint8_t mlx5dr_context_get_reparse_mode(struct mlx5dr_context *ctx);
void mlx5dr_context_set_pool_tbl_attr(struct mlx5dr_pool_attr *attr,
enum mlx5dr_table_type table_type);
+bool mlx5dr_context_cap_stc(struct mlx5dr_context *ctx, uint32_t bit);
+
#endif /* MLX5DR_CONTEXT_H_ */
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 2/3] net/mlx5: add jump FDB Rx flag
2025-02-25 0:45 [PATCH 0/3] net/mlx5: support jump FDB Rx Suanming Mou
2025-02-25 0:45 ` [PATCH 1/3] net/mlx5/hws: " Suanming Mou
@ 2025-02-25 0:45 ` Suanming Mou
2025-02-25 0:45 ` [PATCH 3/3] net/mlx5: allow FDB RSS Suanming Mou
2 siblings, 0 replies; 4+ messages in thread
From: Suanming Mou @ 2025-02-25 0:45 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Matan Azrad
Cc: dev, rasland
When jump FDB Rx is supported, flow will be able to jump
from FDB Tx to FDB Rx, in that case the dest action in FDB
Rx table should support FDB Tx as well.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_devx_cmds.c | 8 ++++++++
drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
drivers/net/mlx5/linux/mlx5_os.c | 1 +
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow_hw.c | 8 +++++++-
5 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index f504b29f31..eb8553e8ad 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -924,6 +924,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
bool hca_cap_2_sup;
uint64_t general_obj_types_supported = 0;
+ uint64_t stc_action_type_127_64;
void *hcattr;
int rc, i;
@@ -1352,6 +1353,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
attr->fdb_unified_en = MLX5_GET(wqe_based_flow_table_cap,
hcattr,
fdb_unified_en);
+ stc_action_type_127_64 = MLX5_GET64(wqe_based_flow_table_cap,
+ hcattr,
+ stc_action_type_127_64);
+ if (stc_action_type_127_64 &
+ (1 << (MLX5_IFC_STC_ACTION_TYPE_JUMP_FLOW_TABLE_FDB_RX_BIT_INDEX -
+ MLX5_IFC_STC_ACTION_TYPE_BIT_64_INDEX)))
+ attr->jump_fdb_rx_en = true;
}
/* Query HCA attribute for ROCE. */
if (attr->roce) {
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 8de4210fb2..6c726a0d46 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -326,6 +326,7 @@ struct mlx5_hca_attr {
uint32_t lag_rx_port_affinity:1;
uint32_t wqe_based_flow_table_sup:1;
uint32_t fdb_unified_en:1;
+ uint32_t jump_fdb_rx_en:1;
uint8_t max_header_modify_pattern_length;
uint64_t system_image_guid;
uint32_t log_max_conn_track_offload:5;
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 9410211e3b..4e64026137 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1718,6 +1718,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->unified_fdb_en = !!priv->master;
DRV_LOG(DEBUG, "port %u: unified FDB %s enabled.",
eth_dev->data->port_id, priv->unified_fdb_en ? "is" : "isn't");
+ priv->jump_fdb_rx_en = sh->cdev->config.hca_attr.jump_fdb_rx_en;
if (priv->sh->config.dv_esw_en) {
uint32_t usable_bits;
uint32_t required_bits;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f73f6e63ff..545ba48b3c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1987,6 +1987,7 @@ struct mlx5_priv {
uint32_t num_lag_ports:4; /* Number of ports can be bonded. */
uint32_t tunnel_enabled:1; /* If tunnel offloading is enabled on rxqs. */
uint32_t unified_fdb_en:1; /* Unified FDB flag per port. */
+ uint32_t jump_fdb_rx_en:1; /* Jump from FDB Tx to FDB Rx flag per port. */
uint16_t domain_id; /* Switch domain identifier. */
uint16_t vport_id; /* Associated VF vport index (if any). */
uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f0888dbf0e..83f55ed3e8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9322,6 +9322,7 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
struct mlx5_flow_group *grp_data;
struct mlx5dr_table *tbl = NULL;
struct mlx5dr_action *jump;
+ uint32_t hws_flags;
uint32_t idx = 0;
MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
attr->transfer ? "FDB" : "NIC", attr->egress ? "egress" : "ingress",
@@ -9342,10 +9343,15 @@ flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx)
goto error;
grp_data->tbl = tbl;
if (attr->group) {
+ hws_flags = mlx5_hw_act_dest_table_flag[dr_tbl_attr.type];
+ /* For case of jump from FDB Tx to FDB Rx as it is supported now. */
+ if (priv->jump_fdb_rx_en &&
+ dr_tbl_attr.type == MLX5DR_TABLE_TYPE_FDB_RX)
+ hws_flags |= MLX5DR_ACTION_FLAG_HWS_FDB_TX;
/* Jump action be used by non-root table. */
jump = mlx5dr_action_create_dest_table
(priv->dr_ctx, tbl,
- mlx5_hw_act_dest_table_flag[dr_tbl_attr.type]);
+ hws_flags);
if (!jump)
goto error;
grp_data->jump.hws_action = jump;
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 3/3] net/mlx5: allow FDB RSS
2025-02-25 0:45 [PATCH 0/3] net/mlx5: support jump FDB Rx Suanming Mou
2025-02-25 0:45 ` [PATCH 1/3] net/mlx5/hws: " Suanming Mou
2025-02-25 0:45 ` [PATCH 2/3] net/mlx5: add jump FDB Rx flag Suanming Mou
@ 2025-02-25 0:45 ` Suanming Mou
2 siblings, 0 replies; 4+ messages in thread
From: Suanming Mou @ 2025-02-25 0:45 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Matan Azrad
Cc: dev, rasland
RSS can be used in FDB Rx rules when JUMP_FDB_RX action to allow
jump from FDB Tx to FDB Rx table.
Different with NIC RSS, FDB RSS will not do the internal implicit
metadata copy.
This commit enables the FDB RSS if JUMP_FDB_RX is supported.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 4 ++--
drivers/net/mlx5/mlx5_flow_dv.c | 32 +++++++++++++++++---------------
drivers/net/mlx5/mlx5_flow_hw.c | 19 ++++++++++++++-----
3 files changed, 33 insertions(+), 22 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3fbe89a9d4..9c6a4f39fb 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1977,9 +1977,9 @@ mlx5_flow_validate_action_mark(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
"mark action not supported for "
"egress");
- if (attr->transfer && mlx5_hws_active(dev))
+ if (attr->transfer && !mlx5_hws_active(dev))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
"non-template mark action not supported for transfer");
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 633c41e358..61d3101ce8 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8939,21 +8939,23 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"unsupported action MARK");
- if (action_flags & MLX5_FLOW_ACTION_QUEUE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "unsupported action QUEUE");
- if (action_flags & MLX5_FLOW_ACTION_RSS)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "unsupported action RSS");
- if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "no fate action is found");
+ if (!priv->jump_fdb_rx_en) {
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (action_flags & MLX5_FLOW_ACTION_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action RSS");
+ if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no fate action is found");
+ }
} else {
if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
return rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 83f55ed3e8..a063e5ac9c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7026,6 +7026,7 @@ mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
uint64_t action_flags,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_queue *queue_mask = template_mask->conf;
const struct rte_flow_attr attr = {
.ingress = template_attr->ingress,
@@ -7034,7 +7035,7 @@ mlx5_hw_validate_action_queue(struct rte_eth_dev *dev,
};
bool masked = queue_mask != NULL && queue_mask->index;
- if (template_attr->egress || template_attr->transfer)
+ if (template_attr->egress || (template_attr->transfer && !priv->jump_fdb_rx_en))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR, NULL,
"QUEUE action supported for ingress only");
@@ -7053,9 +7054,10 @@ mlx5_hw_validate_action_rss(struct rte_eth_dev *dev,
__rte_unused uint64_t action_flags,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_action_rss *mask = template_mask->conf;
- if (template_attr->egress || template_attr->transfer)
+ if (template_attr->egress || (template_attr->transfer && !priv->jump_fdb_rx_en))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR, NULL,
"RSS action supported for ingress only");
@@ -8104,6 +8106,7 @@ __flow_hw_actions_template_create(struct rte_eth_dev *dev,
}
if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
priv->sh->config.dv_esw_en &&
+ !attr->transfer &&
(action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
/* Insert META copy */
mf_actions[expand_mf_num] = rx_meta_copy_action;
@@ -12112,23 +12115,29 @@ __flow_hw_configure(struct rte_eth_dev *dev,
/* Add global actions. */
for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
uint32_t act_flags = 0;
+ uint32_t tag_flags = mlx5_hw_act_flag[i][0];
act_flags = mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_NIC_RX] |
mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_NIC_TX];
if (is_proxy) {
- if (unified_fdb)
+ if (unified_fdb) {
act_flags |=
(mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB_RX] |
mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB_TX] |
mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB_UNIFIED]);
- else
+ if (i == MLX5_HW_ACTION_FLAG_NONE_ROOT)
+ tag_flags |= mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB_RX];
+ } else {
act_flags |= mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB];
+ if (i == MLX5_HW_ACTION_FLAG_NONE_ROOT)
+ tag_flags |= mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB];
+ }
}
priv->hw_drop[i] = mlx5dr_action_create_dest_drop(priv->dr_ctx, act_flags);
if (!priv->hw_drop[i])
goto err;
priv->hw_tag[i] = mlx5dr_action_create_tag
- (priv->dr_ctx, mlx5_hw_act_flag[i][0]);
+ (priv->dr_ctx, tag_flags);
if (!priv->hw_tag[i])
goto err;
}
--
2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread