* [PATCH 0/3] net/mlx5: support indirect list actions
@ 2023-09-27 19:10 Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (2 more replies)
0 siblings, 3 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-09-27 19:10 UTC (permalink / raw)
To: dev; +Cc: getelson,
- Add support for indirect list actions acting as HWS mirror.
- Add support for METER_MARK as indirect list action.
Gregory Etelson (3):
net/mlx5: reformat HWS code
net/mlx5: support HWS mirror action
net/mlx5: support indirect list METER_MARK action
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object
2023-09-27 19:10 [PATCH 0/3] net/mlx5: support indirect list actions Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (15 more replies)
2023-10-17 7:31 ` [PATCH v2 00/16] net/mlx5: support indirect actions list Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
2 siblings, 16 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Hamdan Igbaria, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Suanming Mou
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (14 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Hamdan Igbaria, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Suanming Mou
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
` (13 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Hamdan Igbaria, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Suanming Mou
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 04/16] net/mlx5/hws: add support for mirroring
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (12 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Shun Hao, Alex Vesker, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam, Suanming Mou
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 05/16] net/mlx5/hws: allow destination into default miss FT
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (2 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (11 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Erez Shitrit, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Suanming Mou
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 06/16] net/mlx5/hws: support reformat for hws mirror
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (3 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 07/16] net/mlx5: reformat HWS code Gregory Etelson
` (10 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Haifei Luo, Shun Hao, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Suanming Mou
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 07/16] net/mlx5: reformat HWS code
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (4 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 08/16] net/mlx5: support HWS mirror action Gregory Etelson
` (9 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
Replace if() with switch().
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 08/16] net/mlx5: support HWS mirror action
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (5 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 07/16] net/mlx5: reformat HWS code Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 09/16] net/mlx5: fix mirror action validation Gregory Etelson
` (8 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was inplemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 +++++++
drivers/net/mlx5/mlx5_flow.h | 69 +++-
drivers/net/mlx5/mlx5_flow_hw.c | 616 +++++++++++++++++++++++++++++++-
5 files changed, 817 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..693d1320e1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..f6a752475d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type{
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..44ed23b1fd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,484 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for(i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch(action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq* tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest =priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type, bool decap)
+{
+ int ret;
+ uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch(actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ error);
+
+ if (ret)
+ goto error;
+
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch(type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9984,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 09/16] net/mlx5: fix mirror action validation
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (6 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 08/16] net/mlx5: support HWS mirror action Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
` (7 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
HWS mirror flow action validation rejected flows in NIC domain,
if PMD FDB mode was active.
The patch allows NIC mirror action in FDB mode.
Fixes: 0284c9b82ee8 ("net/mlx5: support HWS mirror action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 44ed23b1fd..910d42a5f5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9547,14 +9547,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch(action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9563,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9585,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9780,15 +9784,17 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 10/16] net/mlx5: fix in shared counter and age template action create
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (7 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 09/16] net/mlx5: fix mirror action validation Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
` (6 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, stable, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
Count and age actions in HWS template are translated into the same
DR5 action.
PMD maintains dedicated variable - `cnt_off`, that points
action location in DR5 array.
Currnet PMD did not initialize the `cnt_off` variable during
shared counter / age actions initialization.
Fixes: feb1f2fe2b76 ("net/mlx5: reformat HWS code")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 910d42a5f5..efb2d512b7 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4680,10 +4680,12 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
+ }
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (8 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 12/16] net/mlx5: refactor HWS code Gregory Etelson
` (5 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
Flow actions in HWS actions template must be arranged according to
pre-defined order.
MLX5 PMD handles RAW_DECAP / RAW_ENCAP actions sequence as a single
RAW ENCAP or DECAP flow action.
When the PMD scanned flow actions to locate position where
MODIFY_FIELD action could be inserted it processed the
RAW_DECAP / RAW_ENCAP sequence as 2 separate actions. As the result
location selection was wrong.
The patch fixes RAW_DECAP / RAW_ENCAP sequence processing for
MODIFY_FIELD expansion.
Fixes: cf7f458 ("net/mlx5: add indirect QUOTA create/query/modify")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 36 +++++++++++++++++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index efb2d512b7..7b060442ac 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4283,6 +4283,28 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the sequence location in actions template list
+ * complies with relative HWS actions order for the required reformat.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4342,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4359,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (flags & MLX5_FLOW_ACTION_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 12/16] net/mlx5: refactor HWS code
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (9 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
` (4 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
1. In `rte_flow_actions_template`, rename `actions_off` into `dr_off`.
2. Remove duplicated code in template table creation.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 +-
drivers/net/mlx5/mlx5_flow_hw.c | 117 ++++++++++++++------------------
2 files changed, 52 insertions(+), 67 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f6a752475d..a31120cd78 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,7 +1331,7 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7b060442ac..53641ae2d5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1506,7 +1506,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
+ uint16_t dr_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,9 +1521,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ dr_pos = at->dr_off[actions - at->actions];
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
@@ -1531,61 +1531,57 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
ret = table_template_translate_indirect_list
(dev, actions, masks, acts,
actions - action_start,
- action_pos);
+ dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, actions - action_start, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ actions - action_start, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type, actions - action_start, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ actions - action_start, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1590,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1604,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ actions - action_start, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1623,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1639,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
@@ -1720,11 +1712,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
err = flow_hw_modify_field_compile(dev, attr, action_start,
@@ -1744,10 +1735,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
(dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ masks, acts, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1746,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1770,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1794,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -4681,7 +4666,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4718,7 +4703,7 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
}
- at->actions_off[action_src] = *cnt_off;
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4838,7 +4823,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4846,14 +4831,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4869,11 +4854,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5234,7 +5219,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5258,10 +5243,10 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 13/16] net/mlx5: fix RTE action location tracking in a template
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (10 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 12/16] net/mlx5: refactor HWS code Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
` (3 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou, Bing Zhao
PMD can implicitly add flow actions to application actions template.
If PMD added actions to a template is must track location of the
original application actions in modified template.
The patch adds tracking ability for the orignal acton in a template.
Fixes: ddb68e4 ("net/mlx5: add extended metadata mode for HWS")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 +-
drivers/net/mlx5/mlx5_flow_hw.c | 90 ++++++++++++++++-----------------
2 files changed, 45 insertions(+), 47 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index a31120cd78..19b26ad333 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1332,10 +1332,10 @@ struct rte_flow_actions_template {
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 53641ae2d5..213b0d5ae8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t dr_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,7 +1518,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- dr_pos = at->dr_off[actions - at->actions];
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
if (!attr->group) {
@@ -1529,9 +1529,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- dr_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
@@ -1542,11 +1540,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, dr_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
- actions - action_start, dr_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
@@ -1566,7 +1564,8 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, dr_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
@@ -1581,7 +1580,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1609,7 +1608,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
@@ -1627,7 +1626,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1643,7 +1642,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1655,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1666,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1696,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1718,26 +1717,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, dr_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,7 +1745,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1772,7 +1761,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
}
if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1802,7 +1791,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1815,7 +1804,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1832,7 +1821,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1916,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4273,8 +4262,11 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
* The PMD handles the sequence as a single encap or decap reformat action,
* depending on the raw_encap configuration.
*
- * The function assumes that the sequence location in actions template list
- * complies with relative HWS actions order for the required reformat.
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
*/
static uint64_t
mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
@@ -4352,7 +4344,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
i++;
goto insert;
}
- if (flags & MLX5_FLOW_ACTION_DECAP)
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
i--;
break;
default:
@@ -5131,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5209,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5220,6 +5215,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
return NULL;
len += RTE_ALIGN(mask_len, 16);
len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5244,12 +5240,14 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
goto error;
/* DR actions offsets in the third part. */
at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 14/16] net/mlx5: fix mirror redirect action
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (11 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
` (2 subsequent siblings)
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
PMD used a buffer located on expired stack to store
mirror reformat data.
The patch moves reformat buffer to the same context as the mirror
action creation.
Fixes: 0284c9b82ee8 ("net/mlx5: support HWS mirror action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 213b0d5ae8..ae017d2815 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9690,11 +9690,11 @@ mirror_format_port(struct rte_eth_dev *dev,
static int
hw_mirror_clone_reformat(const struct rte_flow_action *actions,
- struct mlx5dr_action_dest_attr *dest_attr,
- enum mlx5dr_action_type *action_type, bool decap)
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
{
int ret;
- uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
const struct rte_flow_item *encap_item = NULL;
const struct rte_flow_action_raw_encap *encap_conf = NULL;
typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
@@ -9718,11 +9718,11 @@ hw_mirror_clone_reformat(const struct rte_flow_action *actions,
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
if (encap_item) {
- ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
&reformat->reformat_data_sz, NULL);
if (ret)
return -EINVAL;
- reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ reformat->reformat_data = reformat_buf;
} else {
reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
reformat->reformat_data_sz = encap_conf->size;
@@ -9736,7 +9736,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *table_cfg,
const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- struct rte_flow_error *error)
+ uint8_t *reformat_buf, struct rte_flow_error *error)
{
int ret;
uint32_t i;
@@ -9772,7 +9772,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
&dest_attr->action_type[i],
- decap_seen);
+ reformat_buf, decap_seen);
if (ret < 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -9802,6 +9802,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
@@ -9839,7 +9840,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
}
ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
clone_actions, &mirror_attr[i],
- error);
+ reformat_buf[i], error);
if (ret)
goto error;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 15/16] net/mlx5: support indirect list METER_MARK action
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (12 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
2023-10-17 7:56 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Suanming Mou
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 +++++-
drivers/net/mlx5/mlx5_flow.h | 67 ++++-
drivers/net/mlx5/mlx5_flow_hw.c | 427 +++++++++++++++++++++++++++-----
3 files changed, 482 insertions(+), 81 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 693d1320e1..16fce9c64e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 19b26ad333..2c086026a2 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,41 @@ enum mlx5_indirect_type{
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1256,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1291,6 +1310,7 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
struct {
indirect_list_callback_t cb;
@@ -2017,7 +2037,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2119,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2858,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index ae017d2815..4d070624c8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list.cb(dev, act_data, actions, rule_acts);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4835,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4880,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4943,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5430,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9689,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
for(i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9993,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9857,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9876,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9894,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9901,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9916,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9944,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9953,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy
}
switch(type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9980,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10010,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 16/16] net/mlx5: fix METER_MARK indirection list callback
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (13 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
@ 2023-10-16 18:42 ` Gregory Etelson
2023-10-17 7:56 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Suanming Mou
15 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-16 18:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
. .
/patches/upstream-pmd-indirect-actions-list/v2/v2-0000-cover-letter
. patch, Matan Azrad, Viacheslav Ovsiienko, Ori Kam,
Suanming Mou
Indirect action list METER_MARK handle and configuration parameters
can be independently masked or non-masked in actions template.
Non-masked configuration state is saved in the
`mlx5_action_construct_data::shared_meter` object.
The patch moves indirect action list callback from
`mlx5_action_construct_data` types union to prevent it's collision
with shared_meter.
Fixes: 82641ccee69d ("net/mlx5: support indirect list METER_MARK action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +---
drivers/net/mlx5/mlx5_flow_hw.c | 5 +++--
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2c086026a2..53c11651c8 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1271,6 +1271,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1312,9 +1313,6 @@ struct mlx5_action_construct_data {
uint32_t id;
uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 4d070624c8..5114cc1920 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -605,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -2538,7 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- act_data->indirect_list.cb(dev, act_data, actions, rule_acts);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v2 00/16] net/mlx5: support indirect actions list
2023-09-27 19:10 [PATCH 0/3] net/mlx5: support indirect list actions Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-17 7:31 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
2 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 7:31 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (10):
net/mlx5: reformat HWS code
net/mlx5: support HWS mirror action
net/mlx5: fix mirror action validation
net/mlx5: fix in shared counter and age template action create
net/mlx5: fix modify field expansion for raw DECAP / ENCAP
net/mlx5: refactor HWS code
net/mlx5: fix RTE action location tracking in a template
net/mlx5: fix mirror redirect action
net/mlx5: support indirect list METER_MARK action
net/mlx5: fix METER_MARK indirection list callback
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 112 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1218 +++++++++++++++++++++---
15 files changed, 1908 insertions(+), 168 deletions(-)
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* RE: [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (14 preceding siblings ...)
2023-10-16 18:42 ` [PATCH v2 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
@ 2023-10-17 7:56 ` Suanming Mou
15 siblings, 0 replies; 81+ messages in thread
From: Suanming Mou @ 2023-10-17 7:56 UTC (permalink / raw)
To: Gregory Etelson, dev
Cc: Maayan Kashani, Raslan Darawsheh, Hamdan Igbaria, Matan Azrad,
Slava Ovsiienko, Ori Kam
Hi,
> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Tuesday, October 17, 2023 2:42 AM
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Maayan Kashani
> <mkashani@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Hamdan
> Igbaria <hamdani@nvidia.com>; Matan Azrad <matan@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Suanming
> Mou <suanmingm@nvidia.com>
> Subject: [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object
>
> From: Hamdan Igbaria <hamdani@nvidia.com>
>
> Add support for creation of packet reformat object, via the
> ALLOC_PACKET_REFORMAT_CONTEXT command.
>
> Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
I'm OK for the series, but please update v3 with cover-letter as it is missing.
Thanks.
> ---
> drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
> drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
> drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
> drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
> drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
> 5 files changed, 115 insertions(+), 5 deletions(-)
>
> 2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 00/16] net/mlx5: support indirect actions list
2023-09-27 19:10 [PATCH 0/3] net/mlx5: support indirect list actions Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-17 7:31 ` [PATCH v2 00/16] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (19 more replies)
2 siblings, 20 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Add MLX5 PMD support for indirect actions list.
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (10):
net/mlx5: reformat HWS code
net/mlx5: support HWS mirror action
net/mlx5: fix mirror action validation
net/mlx5: fix in shared counter and age template action create
net/mlx5: fix modify field expansion for raw DECAP / ENCAP
net/mlx5: refactor HWS code
net/mlx5: fix RTE action location tracking in a template
net/mlx5: fix mirror redirect action
net/mlx5: support indirect list METER_MARK action
net/mlx5: fix METER_MARK indirection list callback
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 112 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1218 +++++++++++++++++++++---
15 files changed, 1908 insertions(+), 168 deletions(-)
--
v3: Add ACK to patches in the series.
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 01/16] net/mlx5/hws: add support for reformat DevX object
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (18 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (17 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
` (16 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 04/16] net/mlx5/hws: add support for mirroring
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (2 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (15 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Shun Hao, Alex Vesker, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 05/16] net/mlx5/hws: allow destination into default miss FT
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (3 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (14 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Erez Shitrit, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 06/16] net/mlx5/hws: support reformat for hws mirror
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (4 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 07/16] net/mlx5: reformat HWS code Gregory Etelson
` (13 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Haifei Luo, Shun Hao, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 07/16] net/mlx5: reformat HWS code
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (5 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 08/16] net/mlx5: support HWS mirror action Gregory Etelson
` (12 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Replace if() with switch().
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 08/16] net/mlx5: support HWS mirror action
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (6 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 07/16] net/mlx5: reformat HWS code Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 09/16] net/mlx5: fix mirror action validation Gregory Etelson
` (11 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was inplemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 +++++++
drivers/net/mlx5/mlx5_flow.h | 69 +++-
drivers/net/mlx5/mlx5_flow_hw.c | 616 +++++++++++++++++++++++++++++++-
5 files changed, 817 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..693d1320e1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..f6a752475d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type{
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..44ed23b1fd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,484 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for(i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch(action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq* tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest =priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type, bool decap)
+{
+ int ret;
+ uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch(actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ error);
+
+ if (ret)
+ goto error;
+
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch(type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9984,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 09/16] net/mlx5: fix mirror action validation
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (7 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 08/16] net/mlx5: support HWS mirror action Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
` (10 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror flow action validation rejected flows in NIC domain,
if PMD FDB mode was active.
The patch allows NIC mirror action in FDB mode.
Fixes: 0284c9b82ee8 ("net/mlx5: support HWS mirror action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 44ed23b1fd..910d42a5f5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9547,14 +9547,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch(action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9563,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9585,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9780,15 +9784,17 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 10/16] net/mlx5: fix in shared counter and age template action create
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (8 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 09/16] net/mlx5: fix mirror action validation Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
` (9 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, stable, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Count and age actions in HWS template are translated into the same
DR5 action.
PMD maintains dedicated variable - `cnt_off`, that points
action location in DR5 array.
Currnet PMD did not initialize the `cnt_off` variable during
shared counter / age actions initialization.
Fixes: feb1f2fe2b76 ("net/mlx5: reformat HWS code")
Cc: stable@dpdk.org
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 910d42a5f5..efb2d512b7 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4680,10 +4680,12 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
+ }
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (9 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 12/16] net/mlx5: refactor HWS code Gregory Etelson
` (8 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Flow actions in HWS actions template must be arranged according to
pre-defined order.
MLX5 PMD handles RAW_DECAP / RAW_ENCAP actions sequence as a single
RAW ENCAP or DECAP flow action.
When the PMD scanned flow actions to locate position where
MODIFY_FIELD action could be inserted it processed the
RAW_DECAP / RAW_ENCAP sequence as 2 separate actions. As the result
location selection was wrong.
The patch fixes RAW_DECAP / RAW_ENCAP sequence processing for
MODIFY_FIELD expansion.
Fixes: cf7f458 ("net/mlx5: add indirect QUOTA create/query/modify")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 36 +++++++++++++++++++++++++++++++--
1 file changed, 34 insertions(+), 2 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index efb2d512b7..7b060442ac 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4283,6 +4283,28 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the sequence location in actions template list
+ * complies with relative HWS actions order for the required reformat.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4342,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4359,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (flags & MLX5_FLOW_ACTION_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 12/16] net/mlx5: refactor HWS code
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (10 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
` (7 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
1. In `rte_flow_actions_template`, rename `actions_off` into `dr_off`.
2. Remove duplicated code in template table creation.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 +-
drivers/net/mlx5/mlx5_flow_hw.c | 117 ++++++++++++++------------------
2 files changed, 52 insertions(+), 67 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f6a752475d..a31120cd78 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,7 +1331,7 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 7b060442ac..53641ae2d5 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1506,7 +1506,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
+ uint16_t dr_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,9 +1521,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ dr_pos = at->dr_off[actions - at->actions];
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
@@ -1531,61 +1531,57 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
ret = table_template_translate_indirect_list
(dev, actions, masks, acts,
actions - action_start,
- action_pos);
+ dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, actions - action_start, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ actions - action_start, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type, actions - action_start, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ actions - action_start, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1590,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1604,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ actions - action_start, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1623,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1639,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
@@ -1720,11 +1712,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
err = flow_hw_modify_field_compile(dev, attr, action_start,
@@ -1744,10 +1735,9 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
(dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ masks, acts, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1746,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1770,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1794,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ actions - action_start, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
+ actions->type,
actions - action_start,
- action_pos))
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -4681,7 +4666,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4718,7 +4703,7 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
}
- at->actions_off[action_src] = *cnt_off;
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4838,7 +4823,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4846,14 +4831,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4869,11 +4854,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5234,7 +5219,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5258,10 +5243,10 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 13/16] net/mlx5: fix RTE action location tracking in a template
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (11 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 12/16] net/mlx5: refactor HWS code Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
` (6 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam, Bing Zhao
PMD can implicitly add flow actions to application actions template.
If PMD added actions to a template is must track location of the
original application actions in modified template.
The patch adds tracking ability for the orignal acton in a template.
Fixes: ddb68e4 ("net/mlx5: add extended metadata mode for HWS")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 +-
drivers/net/mlx5/mlx5_flow_hw.c | 90 ++++++++++++++++-----------------
2 files changed, 45 insertions(+), 47 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index a31120cd78..19b26ad333 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1332,10 +1332,10 @@ struct rte_flow_actions_template {
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 53641ae2d5..213b0d5ae8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t dr_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,7 +1518,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- dr_pos = at->dr_off[actions - at->actions];
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
if (!attr->group) {
@@ -1529,9 +1529,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- dr_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
@@ -1542,11 +1540,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, dr_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
- actions - action_start, dr_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
@@ -1566,7 +1564,8 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, dr_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
@@ -1581,7 +1580,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1609,7 +1608,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
@@ -1627,7 +1626,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1643,7 +1642,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1655,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1666,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1696,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1718,26 +1717,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, dr_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,7 +1745,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1772,7 +1761,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
}
if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1802,7 +1791,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1815,7 +1804,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, dr_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1832,7 +1821,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
actions->type,
- actions - action_start,
+ src_pos,
dr_pos))
goto err;
break;
@@ -1916,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4273,8 +4262,11 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
* The PMD handles the sequence as a single encap or decap reformat action,
* depending on the raw_encap configuration.
*
- * The function assumes that the sequence location in actions template list
- * complies with relative HWS actions order for the required reformat.
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
*/
static uint64_t
mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
@@ -4352,7 +4344,7 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
i++;
goto insert;
}
- if (flags & MLX5_FLOW_ACTION_DECAP)
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
i--;
break;
default:
@@ -5131,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5209,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5220,6 +5215,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
return NULL;
len += RTE_ALIGN(mask_len, 16);
len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5244,12 +5240,14 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
goto error;
/* DR actions offsets in the third part. */
at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 14/16] net/mlx5: fix mirror redirect action
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (12 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
` (5 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
PMD used a buffer located on expired stack to store
mirror reformat data.
The patch moves reformat buffer to the same context as the mirror
action creation.
Fixes: 0284c9b82ee8 ("net/mlx5: support HWS mirror action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 213b0d5ae8..ae017d2815 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9690,11 +9690,11 @@ mirror_format_port(struct rte_eth_dev *dev,
static int
hw_mirror_clone_reformat(const struct rte_flow_action *actions,
- struct mlx5dr_action_dest_attr *dest_attr,
- enum mlx5dr_action_type *action_type, bool decap)
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
{
int ret;
- uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
const struct rte_flow_item *encap_item = NULL;
const struct rte_flow_action_raw_encap *encap_conf = NULL;
typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
@@ -9718,11 +9718,11 @@ hw_mirror_clone_reformat(const struct rte_flow_action *actions,
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
if (encap_item) {
- ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
&reformat->reformat_data_sz, NULL);
if (ret)
return -EINVAL;
- reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ reformat->reformat_data = reformat_buf;
} else {
reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
reformat->reformat_data_sz = encap_conf->size;
@@ -9736,7 +9736,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *table_cfg,
const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- struct rte_flow_error *error)
+ uint8_t *reformat_buf, struct rte_flow_error *error)
{
int ret;
uint32_t i;
@@ -9772,7 +9772,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
&dest_attr->action_type[i],
- decap_seen);
+ reformat_buf, decap_seen);
if (ret < 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -9802,6 +9802,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
@@ -9839,7 +9840,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
}
ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
clone_actions, &mirror_attr[i],
- error);
+ reformat_buf[i], error);
if (ret)
goto error;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 15/16] net/mlx5: support indirect list METER_MARK action
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (13 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
` (4 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 +++++-
drivers/net/mlx5/mlx5_flow.h | 67 ++++-
drivers/net/mlx5/mlx5_flow_hw.c | 427 +++++++++++++++++++++++++++-----
3 files changed, 482 insertions(+), 81 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 693d1320e1..16fce9c64e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 19b26ad333..2c086026a2 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,41 @@ enum mlx5_indirect_type{
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1256,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1291,6 +1310,7 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
struct {
indirect_list_callback_t cb;
@@ -2017,7 +2037,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2119,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2858,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index ae017d2815..4d070624c8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list.cb(dev, act_data, actions, rule_acts);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4835,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4880,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4943,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5430,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9689,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
for(i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9993,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9857,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9876,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9894,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9901,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9916,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9944,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9953,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy
}
switch(type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9980,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10010,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v3 16/16] net/mlx5: fix METER_MARK indirection list callback
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (14 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
@ 2023-10-17 8:09 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (3 subsequent siblings)
19 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-17 8:09 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Indirect action list METER_MARK handle and configuration parameters
can be independently masked or non-masked in actions template.
Non-masked configuration state is saved in the
`mlx5_action_construct_data::shared_meter` object.
The patch moves indirect action list callback from
`mlx5_action_construct_data` types union to prevent it's collision
with shared_meter.
Fixes: 82641ccee69d ("net/mlx5: support indirect list METER_MARK action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +---
drivers/net/mlx5/mlx5_flow_hw.c | 5 +++--
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2c086026a2..53c11651c8 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1271,6 +1271,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1312,9 +1313,6 @@ struct mlx5_action_construct_data {
uint32_t id;
uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 4d070624c8..5114cc1920 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -605,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -2538,7 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- act_data->indirect_list.cb(dev, act_data, actions, rule_acts);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 00/10] net/mlx5: support indirect actions list
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (15 preceding siblings ...)
2023-10-17 8:09 ` [PATCH v3 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (9 more replies)
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (2 subsequent siblings)
19 siblings, 10 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Add MLX5 PMD support for indirect actions list.
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (4):
net/mlx5: reformat HWS code for HWS mirror action
net/mlx5: support HWS mirror action
net/mlx5: reformat HWS code for indirect list actions
net/mlx5: support indirect list METER_MARK action
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 111 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1218 +++++++++++++++++++++---
15 files changed, 1907 insertions(+), 168 deletions(-)
--
v3: Add ACK to patches in the series.
v4: Squash reformat patches.
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 01/10] net/mlx5/hws: add support for reformat DevX object
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (8 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (7 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
` (6 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 04/10] net/mlx5/hws: add support for mirroring
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (2 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (5 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Shun Hao, Alex Vesker, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 05/10] net/mlx5/hws: allow destination into default miss FT
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (3 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (4 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Erez Shitrit, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 06/10] net/mlx5/hws: support reformat for hws mirror
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (4 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
` (3 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Haifei Luo, Shun Hao, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 07/10] net/mlx5: reformat HWS code for HWS mirror action
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (5 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 08/10] net/mlx5: support " Gregory Etelson
` (2 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 08/10] net/mlx5: support HWS mirror action
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (6 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was inplemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 +++++++
drivers/net/mlx5/mlx5_flow.h | 69 +++-
drivers/net/mlx5/mlx5_flow_hw.c | 616 +++++++++++++++++++++++++++++++-
5 files changed, 817 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..693d1320e1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..f6a752475d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type{
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..22a6508ae8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,484 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for(i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch(action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq* tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest = priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type, bool decap)
+{
+ int ret;
+ uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch (actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ error);
+
+ if (ret)
+ goto error;
+
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9984,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 09/10] net/mlx5: reformat HWS code for indirect list actions
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (7 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 08/10] net/mlx5: support " Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +-
drivers/net/mlx5/mlx5_flow_hw.c | 252 +++++++++++++++++---------------
2 files changed, 140 insertions(+), 116 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index f6a752475d..19b26ad333 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,11 +1331,11 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 22a6508ae8..e8544a4f2b 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,71 +1518,69 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- action_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1589,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1603,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1622,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1638,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1663,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1674,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1704,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1720,34 +1711,22 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1735,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1759,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1783,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -1931,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4283,6 +4257,31 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4319,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4336,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
@@ -4649,7 +4658,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4680,11 +4689,13 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
- at->actions_off[action_src] = *cnt_off;
+ }
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4804,7 +4815,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4812,14 +4823,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4835,11 +4846,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5112,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5190,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5200,7 +5214,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5224,13 +5239,15 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
@@ -9547,14 +9564,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch(action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9580,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9602,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9669,11 +9690,11 @@ mirror_format_port(struct rte_eth_dev *dev,
static int
hw_mirror_clone_reformat(const struct rte_flow_action *actions,
- struct mlx5dr_action_dest_attr *dest_attr,
- enum mlx5dr_action_type *action_type, bool decap)
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
{
int ret;
- uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
const struct rte_flow_item *encap_item = NULL;
const struct rte_flow_action_raw_encap *encap_conf = NULL;
typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
@@ -9697,11 +9718,11 @@ hw_mirror_clone_reformat(const struct rte_flow_action *actions,
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
if (encap_item) {
- ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
&reformat->reformat_data_sz, NULL);
if (ret)
return -EINVAL;
- reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ reformat->reformat_data = reformat_buf;
} else {
reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
reformat->reformat_data_sz = encap_conf->size;
@@ -9715,7 +9736,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *table_cfg,
const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- struct rte_flow_error *error)
+ uint8_t *reformat_buf, struct rte_flow_error *error)
{
int ret;
uint32_t i;
@@ -9751,7 +9772,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
&dest_attr->action_type[i],
- decap_seen);
+ reformat_buf, decap_seen);
if (ret < 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -9780,15 +9801,18 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
@@ -9816,7 +9840,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
}
ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
clone_actions, &mirror_attr[i],
- error);
+ reformat_buf[i], error);
if (ret)
goto error;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v4 10/10] net/mlx5: support indirect list METER_MARK action
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (8 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
@ 2023-10-23 12:42 ` Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-23 12:42 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 ++++-
drivers/net/mlx5/mlx5_flow.h | 70 ++++--
drivers/net/mlx5/mlx5_flow_hw.c | 430 +++++++++++++++++++++++++++-----
3 files changed, 484 insertions(+), 85 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 693d1320e1..16fce9c64e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 19b26ad333..58e345057f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,40 @@ enum mlx5_indirect_type{
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1255,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
@@ -2017,7 +2034,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index e8544a4f2b..5daec3524d 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4881,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
for(i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9857,15 +10027,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9876,6 +10103,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9894,6 +10122,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9901,8 +10139,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9916,6 +10154,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9944,6 +10183,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9953,20 +10201,17 @@ flow_hw_async_action_list_handle_destroy
}
switch (type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9980,6 +10225,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10010,10 +10302,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 00/10] net/mlx5: support indirect actions list
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (16 preceding siblings ...)
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (9 more replies)
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
19 siblings, 10 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Add MLX5 PMD support for indirect actions list.
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (4):
net/mlx5: reformat HWS code for HWS mirror action
net/mlx5: support HWS mirror action
net/mlx5: reformat HWS code for indirect list actions
net/mlx5: support indirect list METER_MARK action
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 111 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1217 +++++++++++++++++++++---
17 files changed, 1908 insertions(+), 168 deletions(-)
--
v3: Add ACK to patches in the series.
v4: Squash reformat patches.
v5: Update release notes.
Fix code style.
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 01/10] net/mlx5/hws: add support for reformat DevX object
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (8 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (7 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
` (6 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 04/10] net/mlx5/hws: add support for mirroring
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (2 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (5 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Shun Hao, Alex Vesker, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 05/10] net/mlx5/hws: allow destination into default miss FT
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (3 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (4 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Erez Shitrit, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 06/10] net/mlx5/hws: support reformat for hws mirror
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (4 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
` (3 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Haifei Luo, Shun Hao, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 07/10] net/mlx5: reformat HWS code for HWS mirror action
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (5 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 08/10] net/mlx5: support " Gregory Etelson
` (2 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for HWS mirror action.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 08/10] net/mlx5: support HWS mirror action
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (6 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was inplemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 ++++++
drivers/net/mlx5/mlx5_flow.h | 69 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 615 ++++++++++++++++++++++++-
7 files changed, 818 insertions(+), 5 deletions(-)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index fc67415c6c..a85d755734 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -106,6 +106,7 @@ drop = Y
flag = Y
inc_tcp_ack = Y
inc_tcp_seq = Y
+indirect_list = Y
jump = Y
mark = Y
meter = Y
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..81d606e773 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -143,6 +143,7 @@ New Features
* **Updated NVIDIA mlx5 net driver.**
* Added support for Network Service Header (NSH) flow matching.
+ * Added support for ``RTE_FLOW_ACTION_TYPE_INDIRECT_LIST`` flow action.
* **Updated Solarflare net driver.**
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..99b814d815 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..580db80fd4 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type {
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..1c3d915be1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,483 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for (i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq *tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest = priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type, bool decap)
+{
+ int ret;
+ uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch (actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ error);
+
+ if (ret)
+ goto error;
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9983,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 09/10] net/mlx5: reformat HWS code for indirect list actions
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (7 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 08/10] net/mlx5: support " Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for indirect list actions.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +-
drivers/net/mlx5/mlx5_flow_hw.c | 250 +++++++++++++++++---------------
2 files changed, 139 insertions(+), 115 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 580db80fd4..653f83cf55 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,11 +1331,11 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 1c3d915be1..f9f735ba75 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,71 +1518,69 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- action_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1589,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1603,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1622,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1638,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1663,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1674,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1704,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1720,34 +1711,22 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1735,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1759,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1783,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -1931,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4283,6 +4257,31 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4319,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4336,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
@@ -4649,7 +4658,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4680,11 +4689,13 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
- at->actions_off[action_src] = *cnt_off;
+ }
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4804,7 +4815,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4812,14 +4823,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4835,11 +4846,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5112,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5190,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5200,7 +5214,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5224,13 +5239,15 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
@@ -9547,14 +9564,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9580,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9602,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9670,10 +9691,10 @@ mirror_format_port(struct rte_eth_dev *dev,
static int
hw_mirror_clone_reformat(const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- enum mlx5dr_action_type *action_type, bool decap)
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
{
int ret;
- uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
const struct rte_flow_item *encap_item = NULL;
const struct rte_flow_action_raw_encap *encap_conf = NULL;
typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
@@ -9697,11 +9718,11 @@ hw_mirror_clone_reformat(const struct rte_flow_action *actions,
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
if (encap_item) {
- ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
&reformat->reformat_data_sz, NULL);
if (ret)
return -EINVAL;
- reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ reformat->reformat_data = reformat_buf;
} else {
reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
reformat->reformat_data_sz = encap_conf->size;
@@ -9715,7 +9736,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *table_cfg,
const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- struct rte_flow_error *error)
+ uint8_t *reformat_buf, struct rte_flow_error *error)
{
int ret;
uint32_t i;
@@ -9751,7 +9772,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
&dest_attr->action_type[i],
- decap_seen);
+ reformat_buf, decap_seen);
if (ret < 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -9780,15 +9801,18 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
@@ -9816,7 +9840,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
}
ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
clone_actions, &mirror_attr[i],
- error);
+ reformat_buf[i], error);
if (ret)
goto error;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v5 10/10] net/mlx5: support indirect list METER_MARK action
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (8 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
@ 2023-10-25 10:27 ` Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 10:27 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 ++++-
drivers/net/mlx5/mlx5_flow.h | 70 ++++--
drivers/net/mlx5/mlx5_flow_hw.c | 432 +++++++++++++++++++++++++++-----
3 files changed, 485 insertions(+), 86 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 99b814d815..34252d66c0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 653f83cf55..3ea2548d2b 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,40 @@ enum mlx5_indirect_type {
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1255,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
@@ -2017,7 +2034,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f9f735ba75..52f9f4a47e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4881,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
- for (i = 0; i < mirror->clones_num; i++)
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
+ for(i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9856,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9875,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9893,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9900,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9915,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9943,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9952,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy
}
switch (type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9979,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10009,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 00/10] net/mlx5: support indirect actions list
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (17 preceding siblings ...)
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (9 more replies)
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
19 siblings, 10 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Add MLX5 PMD support for indirect actions list.
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (4):
net/mlx5: reformat HWS code for HWS mirror action
net/mlx5: support HWS mirror action
net/mlx5: reformat HWS code for indirect list actions
net/mlx5: support indirect list METER_MARK action
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 111 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1217 +++++++++++++++++++++---
17 files changed, 1908 insertions(+), 168 deletions(-)
--
v3: Add ACK to patches in the series.
v4: Squash reformat patches.
v5: Update release notes.
Fix code style.
v6: Fix code style.
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 01/10] net/mlx5/hws: add support for reformat DevX object
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (8 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (7 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
` (6 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 04/10] net/mlx5/hws: add support for mirroring
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (2 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (5 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Shun Hao, Alex Vesker, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 05/10] net/mlx5/hws: allow destination into default miss FT
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (3 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (4 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Erez Shitrit, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 06/10] net/mlx5/hws: support reformat for hws mirror
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (4 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
` (3 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Haifei Luo, Shun Hao, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 07/10] net/mlx5: reformat HWS code for HWS mirror action
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (5 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 08/10] net/mlx5: support " Gregory Etelson
` (2 subsequent siblings)
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for HWS mirror action.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 08/10] net/mlx5: support HWS mirror action
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (6 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was implemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 ++++++
drivers/net/mlx5/mlx5_flow.h | 69 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 615 ++++++++++++++++++++++++-
7 files changed, 818 insertions(+), 5 deletions(-)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index fc67415c6c..a85d755734 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -106,6 +106,7 @@ drop = Y
flag = Y
inc_tcp_ack = Y
inc_tcp_seq = Y
+indirect_list = Y
jump = Y
mark = Y
meter = Y
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..81d606e773 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -143,6 +143,7 @@ New Features
* **Updated NVIDIA mlx5 net driver.**
* Added support for Network Service Header (NSH) flow matching.
+ * Added support for ``RTE_FLOW_ACTION_TYPE_INDIRECT_LIST`` flow action.
* **Updated Solarflare net driver.**
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..99b814d815 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..580db80fd4 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type {
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..1c3d915be1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,483 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for (i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq *tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest = priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type, bool decap)
+{
+ int ret;
+ uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch (actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ error);
+
+ if (ret)
+ goto error;
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9983,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 09/10] net/mlx5: reformat HWS code for indirect list actions
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (7 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 08/10] net/mlx5: support " Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for indirect list actions.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +-
drivers/net/mlx5/mlx5_flow_hw.c | 250 +++++++++++++++++---------------
2 files changed, 139 insertions(+), 115 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 580db80fd4..653f83cf55 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,11 +1331,11 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 1c3d915be1..f9f735ba75 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,71 +1518,69 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- action_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1589,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1603,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1622,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1638,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1663,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1674,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1704,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1720,34 +1711,22 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1735,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1759,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1783,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -1931,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4283,6 +4257,31 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4319,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4336,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
@@ -4649,7 +4658,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4680,11 +4689,13 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
- at->actions_off[action_src] = *cnt_off;
+ }
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4804,7 +4815,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4812,14 +4823,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4835,11 +4846,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5112,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5190,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5200,7 +5214,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5224,13 +5239,15 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
@@ -9547,14 +9564,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9580,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9602,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9670,10 +9691,10 @@ mirror_format_port(struct rte_eth_dev *dev,
static int
hw_mirror_clone_reformat(const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- enum mlx5dr_action_type *action_type, bool decap)
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
{
int ret;
- uint8_t encap_buf[MLX5_ENCAP_MAX_LEN];
const struct rte_flow_item *encap_item = NULL;
const struct rte_flow_action_raw_encap *encap_conf = NULL;
typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
@@ -9697,11 +9718,11 @@ hw_mirror_clone_reformat(const struct rte_flow_action *actions,
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
if (encap_item) {
- ret = flow_dv_convert_encap_data(encap_item, encap_buf,
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
&reformat->reformat_data_sz, NULL);
if (ret)
return -EINVAL;
- reformat->reformat_data = (void *)(uintptr_t)encap_buf;
+ reformat->reformat_data = reformat_buf;
} else {
reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
reformat->reformat_data_sz = encap_conf->size;
@@ -9715,7 +9736,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *table_cfg,
const struct rte_flow_action *actions,
struct mlx5dr_action_dest_attr *dest_attr,
- struct rte_flow_error *error)
+ uint8_t *reformat_buf, struct rte_flow_error *error)
{
int ret;
uint32_t i;
@@ -9751,7 +9772,7 @@ hw_mirror_format_clone(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
&dest_attr->action_type[i],
- decap_seen);
+ reformat_buf, decap_seen);
if (ret < 0)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -9780,15 +9801,18 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
[MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
@@ -9816,7 +9840,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
}
ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
clone_actions, &mirror_attr[i],
- error);
+ reformat_buf[i], error);
if (ret)
goto error;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v6 10/10] net/mlx5: support indirect list METER_MARK action
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (8 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
@ 2023-10-25 11:22 ` Gregory Etelson
9 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-25 11:22 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 ++++-
drivers/net/mlx5/mlx5_flow.h | 70 ++++--
drivers/net/mlx5/mlx5_flow_hw.c | 430 +++++++++++++++++++++++++++-----
3 files changed, 484 insertions(+), 85 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 99b814d815..34252d66c0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 653f83cf55..3ea2548d2b 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,40 @@ enum mlx5_indirect_type {
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1255,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
@@ -2017,7 +2034,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f9f735ba75..b6a474021a 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4881,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
for (i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9856,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9875,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9893,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9900,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9915,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9943,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9952,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy
}
switch (type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9979,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10009,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 00/10] net/mlx5: support indirect actions list
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
` (18 preceding siblings ...)
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
` (10 more replies)
19 siblings, 11 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev; +Cc: getelson, , rasland
Add MLX5 PMD support for indirect actions list.
Erez Shitrit (1):
net/mlx5/hws: allow destination into default miss FT
Gregory Etelson (4):
net/mlx5: reformat HWS code for HWS mirror action
net/mlx5: support HWS mirror action
net/mlx5: reformat HWS code for indirect list actions
net/mlx5: support indirect list METER_MARK action
Haifei Luo (1):
net/mlx5/hws: support reformat for hws mirror
Hamdan Igbaria (3):
net/mlx5/hws: add support for reformat DevX object
net/mlx5/hws: support creating of dynamic forward table and FTE
net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
Shun Hao (1):
net/mlx5/hws: add support for mirroring
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/common/mlx5/mlx5_prm.h | 81 +-
drivers/net/mlx5/hws/mlx5dr.h | 34 +
drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 199 ++++
drivers/net/mlx5/mlx5_flow.h | 111 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 1217 +++++++++++++++++++++---
17 files changed, 1908 insertions(+), 168 deletions(-)
--
v3: Add ACK to patches in the series.
v4: Squash reformat patches.
v5: Update release notes.
Fix code style.
v6: Fix code style.
v7: Fix incremental compilation failure.
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 01/10] net/mlx5/hws: add support for reformat DevX object
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
` (9 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add support for creation of packet reformat object,
via the ALLOC_PACKET_REFORMAT_CONTEXT command.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 39 +++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 60 ++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 11 +++++
drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +++
drivers/net/mlx5/hws/mlx5dr_send.c | 5 ---
5 files changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 6e181a0eca..4192fff55b 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1218,6 +1218,8 @@ enum {
MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
@@ -5191,6 +5193,43 @@ struct mlx5_ifc_modify_flow_table_out_bits {
u8 reserved_at_40[0x60];
};
+struct mlx5_ifc_packet_reformat_context_in_bits {
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_16[0x6];
+ u8 reformat_data_size[0xa];
+
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_40[0x8];
+ u8 reformat_data[6][0x8];
+
+ u8 more_reformat_data[][0x8];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0xa0];
+
+ u8 packet_reformat_context[];
+};
+
+struct mlx5_ifc_alloc_packet_reformat_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 594c59aee3..0ccbaee961 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -780,6 +780,66 @@ mlx5dr_cmd_sq_create(struct ibv_context *ctx,
return devx_obj;
}
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr)
+{
+ uint32_t out[MLX5_ST_SZ_DW(alloc_packet_reformat_out)] = {0};
+ size_t insz, cmd_data_sz, cmd_total_sz;
+ struct mlx5dr_devx_obj *devx_obj;
+ void *prctx;
+ void *pdata;
+ void *in;
+
+ cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+ cmd_total_sz += MLX5_ST_SZ_BYTES(packet_reformat_context_in);
+ cmd_data_sz = MLX5_FLD_SZ_BYTES(packet_reformat_context_in, reformat_data);
+ insz = align(cmd_total_sz + attr->data_sz - cmd_data_sz, DW_SIZE);
+ in = simple_calloc(1, insz);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+ prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in,
+ packet_reformat_context);
+ pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_type, attr->type);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, attr->reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, attr->data_sz);
+ memcpy(pdata, attr->data, attr->data_sz);
+
+ devx_obj = simple_malloc(sizeof(*devx_obj));
+ if (!devx_obj) {
+ DR_LOG(ERR, "Failed to allocate memory for packet reformat object");
+ rte_errno = ENOMEM;
+ goto out_free_in;
+ }
+
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, insz, out, sizeof(out));
+ if (!devx_obj->obj) {
+ DR_LOG(ERR, "Failed to create packet reformat");
+ rte_errno = errno;
+ goto out_free_devx;
+ }
+
+ devx_obj->id = MLX5_GET(alloc_packet_reformat_out, out, packet_reformat_id);
+
+ simple_free(in);
+
+ return devx_obj;
+
+out_free_devx:
+ simple_free(devx_obj);
+out_free_in:
+ simple_free(in);
+ return NULL;
+}
+
int mlx5dr_cmd_sq_modify_rdy(struct mlx5dr_devx_obj *devx_obj)
{
uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 8a495db9b3..f45b6c6b07 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -155,6 +155,13 @@ struct mlx5dr_cmd_allow_other_vhca_access_attr {
uint8_t access_key[ACCESS_KEY_LEN];
};
+struct mlx5dr_cmd_packet_reformat_create_attr {
+ uint8_t type;
+ size_t data_sz;
+ void *data;
+ uint8_t reformat_param_0;
+};
+
struct mlx5dr_cmd_query_ft_caps {
uint8_t max_level;
uint8_t reparse;
@@ -285,6 +292,10 @@ mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
diff --git a/drivers/net/mlx5/hws/mlx5dr_internal.h b/drivers/net/mlx5/hws/mlx5dr_internal.h
index c3c077667d..3770d28e62 100644
--- a/drivers/net/mlx5/hws/mlx5dr_internal.h
+++ b/drivers/net/mlx5/hws/mlx5dr_internal.h
@@ -91,4 +91,9 @@ static inline uint64_t roundup_pow_of_two(uint64_t n)
return n == 1 ? 1 : 1ULL << log2above(n);
}
+static inline unsigned long align(unsigned long val, unsigned long align)
+{
+ return (val + align - 1) & ~(align - 1);
+}
+
#endif /* MLX5DR_INTERNAL_H_ */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index e58fdeb117..622d574bfa 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -668,11 +668,6 @@ static int mlx5dr_send_ring_create_sq_obj(struct mlx5dr_context *ctx,
return err;
}
-static inline unsigned long align(unsigned long val, unsigned long align)
-{
- return (val + align - 1) & ~(align - 1);
-}
-
static int mlx5dr_send_ring_open_sq(struct mlx5dr_context *ctx,
struct mlx5dr_send_engine *queue,
struct mlx5dr_send_ring_sq *sq,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
` (8 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add the ability to create forward table and FTE.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 4 ++++
drivers/net/mlx5/hws/mlx5dr_cmd.c | 13 +++++++++++++
drivers/net/mlx5/hws/mlx5dr_cmd.h | 19 +++++++++++++++++++
3 files changed, 36 insertions(+)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 4192fff55b..df621b19af 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5048,7 +5048,11 @@ enum mlx5_flow_destination_type {
};
enum mlx5_flow_context_action {
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 1 << 1,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 1 << 2,
+ MLX5_FLOW_CONTEXT_ACTION_REFORMAT = 1 << 4,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 12,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 13,
};
enum mlx5_flow_context_flow_source {
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 0ccbaee961..8f407f9bce 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -42,6 +42,7 @@ mlx5dr_cmd_flow_table_create(struct ibv_context *ctx,
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
MLX5_SET(flow_table_context, ft_ctx, rtc_valid, ft_attr->rtc_valid);
+ MLX5_SET(flow_table_context, ft_ctx, reformat_en, ft_attr->reformat_en);
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
if (!devx_obj->obj) {
@@ -182,12 +183,24 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
+ if (action_flags & MLX5_FLOW_CONTEXT_ACTION_REFORMAT)
+ MLX5_SET(flow_context, in_flow_context,
+ packet_reformat_id, fte_attr->packet_reformat_id);
+
+ if (action_flags & (MLX5_FLOW_CONTEXT_ACTION_DECRYPT | MLX5_FLOW_CONTEXT_ACTION_ENCRYPT)) {
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_type, fte_attr->encrypt_decrypt_type);
+ MLX5_SET(flow_context, in_flow_context,
+ encrypt_decrypt_obj_id, fte_attr->encrypt_decrypt_obj_id);
+ }
+
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* Only destination_list_size of size 1 is supported */
MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
}
devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index f45b6c6b07..bf3a362300 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -7,8 +7,12 @@
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t encrypt_decrypt_type;
+ uint32_t encrypt_decrypt_obj_id;
+ uint32_t packet_reformat_id;
uint8_t destination_type;
uint32_t destination_id;
+ uint8_t ignore_flow_level;
uint8_t flow_source;
};
@@ -16,6 +20,7 @@ struct mlx5dr_cmd_ft_create_attr {
uint8_t type;
uint8_t level;
bool rtc_valid;
+ uint8_t reformat_en;
};
#define ACCESS_KEY_LEN 32
@@ -296,6 +301,20 @@ struct mlx5dr_devx_obj *
mlx5dr_cmd_packet_reformat_create(struct ibv_context *ctx,
struct mlx5dr_cmd_packet_reformat_create_attr *attr);
+struct mlx5dr_devx_obj *
+mlx5dr_cmd_set_fte(struct ibv_context *ctx,
+ uint32_t table_type,
+ uint32_t table_id,
+ uint32_t group_id,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+struct mlx5dr_cmd_forward_tbl *
+mlx5dr_cmd_forward_tbl_create(struct ibv_context *ctx,
+ struct mlx5dr_cmd_ft_create_attr *ft_attr,
+ struct mlx5dr_cmd_set_fte_attr *fte_attr);
+
+void mlx5dr_cmd_forward_tbl_destroy(struct mlx5dr_cmd_forward_tbl *tbl);
+
struct mlx5dr_devx_obj *
mlx5dr_cmd_alias_obj_create(struct ibv_context *ctx,
struct mlx5dr_cmd_alias_obj_create_attr *alias_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
` (7 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Hamdan Igbaria, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Hamdan Igbaria <hamdani@nvidia.com>
Add mlx5dr_devx_obj struct to mlx5dr_action, so we could hold
the FT obj in dest table action.
Signed-off-by: Hamdan Igbaria <hamdani@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 4 ++++
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +++
drivers/net/mlx5/hws/mlx5dr_table.c | 1 -
3 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index ea9fc23732..55ec4f71c9 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -787,6 +787,8 @@ mlx5dr_action_create_dest_table(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, tbl->ft);
if (ret)
goto free_action;
+
+ action->devx_dest.devx_obj = tbl->ft;
}
return action;
@@ -864,6 +866,8 @@ mlx5dr_action_create_dest_tir(struct mlx5dr_context *ctx,
ret = mlx5dr_action_create_stcs(action, cur_obj);
if (ret)
goto clean_obj;
+
+ action->devx_dest.devx_obj = cur_obj;
}
return action;
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 314e289780..104c6880c1 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -148,6 +148,9 @@ struct mlx5dr_action {
struct {
struct mlx5dv_steering_anchor *sa;
} root_tbl;
+ struct {
+ struct mlx5dr_devx_obj *devx_obj;
+ } devx_dest;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index e1150cd75d..91eb92db78 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -68,7 +68,6 @@ static void mlx5dr_table_down_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
return;
mlx5dr_cmd_forward_tbl_destroy(default_miss);
-
ctx->common_res[tbl_type].default_miss = NULL;
}
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 04/10] net/mlx5/hws: add support for mirroring
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (2 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
` (6 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Shun Hao, Alex Vesker, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Shun Hao <shunh@nvidia.com>
This patch supports mirroring by adding an dest_array action. The action
accecpts a list containing multiple destination actions, and can duplicate
packet and forward to each destination in the list.
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Alex Vesker <valex@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 23 ++++-
drivers/net/mlx5/hws/mlx5dr.h | 34 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 130 ++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 3 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 64 ++++++++++---
drivers/net/mlx5/hws/mlx5dr_cmd.h | 21 ++++-
drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
drivers/net/mlx5/hws/mlx5dr_table.c | 7 +-
8 files changed, 262 insertions(+), 21 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index df621b19af..aa0b622ca2 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -2320,7 +2320,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
};
struct mlx5_ifc_esw_cap_bits {
- u8 reserved_at_0[0x60];
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
u8 esw_manager_vport_number_valid[0x1];
u8 reserved_at_61[0xf];
@@ -5045,6 +5049,7 @@ struct mlx5_ifc_query_flow_table_out_bits {
enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
};
enum mlx5_flow_context_action {
@@ -5088,6 +5093,19 @@ union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
u8 reserved_at_0[0x40];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+#define MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL 64
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
struct mlx5_ifc_flow_context_bits {
u8 reserved_at_00[0x20];
u8 group_id[0x20];
@@ -5106,8 +5124,7 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_e0[0x40];
u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0x16c0];
- /* Currently only one destnation */
- union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[1];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[0];
};
struct mlx5_ifc_set_fte_in_bits {
diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index ea8bf683f3..1995c55132 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -46,6 +46,7 @@ enum mlx5dr_action_type {
MLX5DR_ACTION_TYP_ASO_METER,
MLX5DR_ACTION_TYP_ASO_CT,
MLX5DR_ACTION_TYP_DEST_ROOT,
+ MLX5DR_ACTION_TYP_DEST_ARRAY,
MLX5DR_ACTION_TYP_MAX,
};
@@ -213,6 +214,20 @@ struct mlx5dr_rule_action {
};
};
+struct mlx5dr_action_dest_attr {
+ /* Required action combination */
+ enum mlx5dr_action_type *action_type;
+
+ /* Required destination action to forward the packet */
+ struct mlx5dr_action *dest;
+
+ /* Optional reformat data */
+ struct {
+ size_t reformat_data_sz;
+ void *reformat_data;
+ } reformat;
+};
+
/* Open a context used for direct rule insertion using hardware steering.
* Each context can contain multiple tables of different types.
*
@@ -616,6 +631,25 @@ mlx5dr_action_create_pop_vlan(struct mlx5dr_context *ctx, uint32_t flags);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_context *ctx, uint32_t flags);
+/* Create a dest array action, this action can duplicate packets and forward to
+ * multiple destinations in the destination list.
+ * @param[in] ctx
+ * The context in which the new action will be created.
+ * @param[in] num_dest
+ * The number of dests attributes.
+ * @param[in] dests
+ * The destination array. Each contains a destination action and can have
+ * additional actions.
+ * @param[in] flags
+ * Action creation flags. (enum mlx5dr_action_flags)
+ * @return pointer to mlx5dr_action on success NULL otherwise.
+ */
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags);
+
/* Create dest root table, this action will jump to root table according
* the given priority.
* @param[in] ctx
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 55ec4f71c9..f068bc7e9c 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -34,7 +34,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_TIR) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
[MLX5DR_TABLE_TYPE_NIC_TX] = {
@@ -71,7 +72,8 @@ static const uint32_t action_order_arr[MLX5DR_TABLE_TYPE_MAX][MLX5DR_ACTION_TYP_
BIT(MLX5DR_ACTION_TYP_MISS) |
BIT(MLX5DR_ACTION_TYP_VPORT) |
BIT(MLX5DR_ACTION_TYP_DROP) |
- BIT(MLX5DR_ACTION_TYP_DEST_ROOT),
+ BIT(MLX5DR_ACTION_TYP_DEST_ROOT) |
+ BIT(MLX5DR_ACTION_TYP_DEST_ARRAY),
BIT(MLX5DR_ACTION_TYP_LAST),
},
};
@@ -535,6 +537,7 @@ static void mlx5dr_action_fill_stc_attr(struct mlx5dr_action *action,
}
break;
case MLX5DR_ACTION_TYP_TBL:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
attr->action_type = MLX5_IFC_STC_ACTION_TYPE_JUMP_TO_FT;
attr->action_offset = MLX5DR_ACTION_OFFSET_HIT;
attr->dest_table_id = obj->id;
@@ -1700,6 +1703,124 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+struct mlx5dr_action *
+mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
+ size_t num_dest,
+ struct mlx5dr_action_dest_attr *dests,
+ uint32_t flags)
+{
+ struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
+ struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ enum mlx5dr_table_type table_type;
+ struct mlx5dr_action *action;
+ uint32_t i;
+ int ret;
+
+ if (num_dest <= 1) {
+ rte_errno = EINVAL;
+ DR_LOG(ERR, "Action must have multiple dests");
+ return NULL;
+ }
+
+ if (flags == (MLX5DR_ACTION_FLAG_HWS_RX | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_NIC_RX;
+ ft_attr.level = MLX5_IFC_MULTI_PATH_FT_MAX_LEVEL - 1;
+ table_type = MLX5DR_TABLE_TYPE_NIC_RX;
+ } else if (flags == (MLX5DR_ACTION_FLAG_HWS_FDB | MLX5DR_ACTION_FLAG_SHARED)) {
+ ft_attr.type = FS_FT_FDB;
+ ft_attr.level = ctx->caps->fdb_ft.max_level - 1;
+ table_type = MLX5DR_TABLE_TYPE_FDB;
+ } else {
+ DR_LOG(ERR, "Action flags not supported");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ if (mlx5dr_context_shared_gvmi_used(ctx)) {
+ DR_LOG(ERR, "Cannot use this action in shared GVMI context");
+ rte_errno = ENOTSUP;
+ return NULL;
+ }
+
+ dest_list = simple_calloc(num_dest, sizeof(*dest_list));
+ if (!dest_list) {
+ DR_LOG(ERR, "Failed to allocate memory for destinations");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < num_dest; i++) {
+ enum mlx5dr_action_type *action_type = dests[i].action_type;
+
+ if (!mlx5dr_action_check_combo(dests[i].action_type, table_type)) {
+ DR_LOG(ERR, "Invalid combination of actions");
+ rte_errno = EINVAL;
+ goto free_dest_list;
+ }
+
+ for (; *action_type != MLX5DR_ACTION_TYP_LAST; action_type++) {
+ switch (*action_type) {
+ case MLX5DR_ACTION_TYP_TBL:
+ dest_list[i].destination_type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte_attr.ignore_flow_level = 1;
+ break;
+ case MLX5DR_ACTION_TYP_VPORT:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id = dests[i].dest->vport.vport_num;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (ctx->caps->merged_eswitch) {
+ dest_list[i].ext_flags |=
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID;
+ dest_list[i].esw_owner_vhca_id =
+ dests[i].dest->vport.esw_owner_vhca_id;
+ }
+ break;
+ case MLX5DR_ACTION_TYP_TIR:
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ default:
+ DR_LOG(ERR, "Unsupported action in dest_array");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ }
+ }
+ fte_attr.dests_num = num_dest;
+ fte_attr.dests = dest_list;
+
+ fw_island = mlx5dr_cmd_forward_tbl_create(ctx->ibv_ctx, &ft_attr, &fte_attr);
+ if (!fw_island)
+ goto free_dest_list;
+
+ action = mlx5dr_action_create_generic(ctx, flags, MLX5DR_ACTION_TYP_DEST_ARRAY);
+ if (!action)
+ goto destroy_fw_island;
+
+ ret = mlx5dr_action_create_stcs(action, fw_island->ft);
+ if (ret)
+ goto free_action;
+
+ action->dest_array.fw_island = fw_island;
+
+ simple_free(dest_list);
+ return action;
+
+free_action:
+ simple_free(action);
+destroy_fw_island:
+ mlx5dr_cmd_forward_tbl_destroy(fw_island);
+free_dest_list:
+ simple_free(dest_list);
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_root(struct mlx5dr_context *ctx,
uint16_t priority,
@@ -1782,6 +1903,10 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
mlx5dr_action_destroy_stcs(action);
mlx5dr_action_put_shared_stc(action, MLX5DR_CONTEXT_SHARED_STC_POP);
break;
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
+ mlx5dr_action_destroy_stcs(action);
+ mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
for (i = 0; i < action->modify_header.num_of_patterns; i++) {
@@ -2291,6 +2416,7 @@ int mlx5dr_action_template_process(struct mlx5dr_action_template *at)
case MLX5DR_ACTION_TYP_TIR:
case MLX5DR_ACTION_TYP_TBL:
case MLX5DR_ACTION_TYP_DEST_ROOT:
+ case MLX5DR_ACTION_TYP_DEST_ARRAY:
case MLX5DR_ACTION_TYP_VPORT:
case MLX5DR_ACTION_TYP_MISS:
/* Hit action */
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index 104c6880c1..efe07c9d47 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -151,6 +151,9 @@ struct mlx5dr_action {
struct {
struct mlx5dr_devx_obj *devx_obj;
} devx_dest;
+ struct {
+ struct mlx5dr_cmd_forward_tbl *fw_island;
+ } dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 8f407f9bce..22f7c6b019 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -158,18 +158,31 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t group_id,
struct mlx5dr_cmd_set_fte_attr *fte_attr)
{
- uint32_t in[MLX5_ST_SZ_DW(set_fte_in) + MLX5_ST_SZ_DW(dest_format)] = {0};
uint32_t out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
struct mlx5dr_devx_obj *devx_obj;
+ uint32_t dest_entry_sz;
+ uint32_t total_dest_sz;
void *in_flow_context;
uint32_t action_flags;
- void *in_dests;
+ uint8_t *in_dests;
+ uint32_t inlen;
+ uint32_t *in;
+ uint32_t i;
+
+ dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ total_dest_sz = dest_entry_sz * fte_attr->dests_num;
+ inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
+ in = simple_calloc(1, inlen);
+ if (!in) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
devx_obj = simple_malloc(sizeof(*devx_obj));
if (!devx_obj) {
DR_LOG(ERR, "Failed to allocate memory for fte object");
rte_errno = ENOMEM;
- return NULL;
+ goto free_in;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
@@ -179,6 +192,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
MLX5_SET(flow_context, in_flow_context, action, action_flags);
@@ -195,15 +209,39 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
}
if (action_flags & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- /* Only destination_list_size of size 1 is supported */
- MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- MLX5_SET(dest_format, in_dests, destination_type, fte_attr->destination_type);
- MLX5_SET(dest_format, in_dests, destination_id, fte_attr->destination_id);
- MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
+ in_dests = (uint8_t *)MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+
+ for (i = 0; i < fte_attr->dests_num; i++) {
+ struct mlx5dr_cmd_set_fte_dest *dest = &fte_attr->dests[i];
+
+ switch (dest->destination_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID) {
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(dest_format, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dest->esw_owner_vhca_id);
+ }
+ /* Fall through */
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ MLX5_SET(dest_format, in_dests, destination_type,
+ dest->destination_type);
+ MLX5_SET(dest_format, in_dests, destination_id,
+ dest->destination_id);
+ break;
+ default:
+ rte_errno = EOPNOTSUPP;
+ goto free_devx;
+ }
+
+ in_dests = in_dests + dest_entry_sz;
+ }
+ MLX5_SET(flow_context, in_flow_context, destination_list_size, fte_attr->dests_num);
}
- devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
+ devx_obj->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
if (!devx_obj->obj) {
DR_LOG(ERR, "Failed to create FTE (syndrome: %#x)",
mlx5dr_cmd_get_syndrome(out));
@@ -211,10 +249,13 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
goto free_devx;
}
+ simple_free(in);
return devx_obj;
free_devx:
simple_free(devx_obj);
+free_in:
+ simple_free(in);
return NULL;
}
@@ -1244,6 +1285,9 @@ int mlx5dr_cmd_query_caps(struct ibv_context *ctx,
caps->eswitch_manager_vport_number =
MLX5_GET(query_hca_cap_out, out,
capability.esw_cap.esw_manager_vport_number);
+
+ caps->merged_eswitch = MLX5_GET(query_hca_cap_out, out,
+ capability.esw_cap.merged_eswitch);
}
ret = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index bf3a362300..6f4aa3e320 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -5,15 +5,27 @@
#ifndef MLX5DR_CMD_H_
#define MLX5DR_CMD_H_
+enum mlx5dr_cmd_ext_dest_flags {
+ MLX5DR_CMD_EXT_DEST_REFORMAT = 1 << 0,
+ MLX5DR_CMD_EXT_DEST_ESW_OWNER_VHCA_ID = 1 << 1,
+};
+
+struct mlx5dr_cmd_set_fte_dest {
+ uint8_t destination_type;
+ uint32_t destination_id;
+ enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ uint16_t esw_owner_vhca_id;
+};
+
struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
+ uint8_t ignore_flow_level;
+ uint8_t flow_source;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
- uint8_t destination_type;
- uint32_t destination_id;
- uint8_t ignore_flow_level;
- uint8_t flow_source;
+ uint32_t dests_num;
+ struct mlx5dr_cmd_set_fte_dest *dests;
};
struct mlx5dr_cmd_ft_create_attr {
@@ -216,6 +228,7 @@ struct mlx5dr_cmd_query_caps {
struct mlx5dr_cmd_query_ft_caps nic_ft;
struct mlx5dr_cmd_query_ft_caps fdb_ft;
bool eswitch_manager;
+ uint8_t merged_eswitch;
uint32_t eswitch_manager_vport_number;
uint8_t log_header_modify_argument_granularity;
uint8_t log_header_modify_argument_max_alloc;
diff --git a/drivers/net/mlx5/hws/mlx5dr_debug.c b/drivers/net/mlx5/hws/mlx5dr_debug.c
index 89529944a3..e7b1f2cc32 100644
--- a/drivers/net/mlx5/hws/mlx5dr_debug.c
+++ b/drivers/net/mlx5/hws/mlx5dr_debug.c
@@ -23,6 +23,7 @@ const char *mlx5dr_debug_action_type_str[] = {
[MLX5DR_ACTION_TYP_ASO_METER] = "ASO_METER",
[MLX5DR_ACTION_TYP_ASO_CT] = "ASO_CT",
[MLX5DR_ACTION_TYP_DEST_ROOT] = "DEST_ROOT",
+ [MLX5DR_ACTION_TYP_DEST_ARRAY] = "DEST_ARRAY",
};
static_assert(ARRAY_SIZE(mlx5dr_debug_action_type_str) == MLX5DR_ACTION_TYP_MAX,
diff --git a/drivers/net/mlx5/hws/mlx5dr_table.c b/drivers/net/mlx5/hws/mlx5dr_table.c
index 91eb92db78..55b9b20150 100644
--- a/drivers/net/mlx5/hws/mlx5dr_table.c
+++ b/drivers/net/mlx5/hws/mlx5dr_table.c
@@ -22,6 +22,7 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *default_miss;
+ struct mlx5dr_cmd_set_fte_dest dest = {0};
struct mlx5dr_context *ctx = tbl->ctx;
uint8_t tbl_type = tbl->type;
@@ -37,9 +38,11 @@ mlx5dr_table_up_default_fdb_miss_tbl(struct mlx5dr_table *tbl)
ft_attr.level = tbl->ctx->caps->fdb_ft.max_level; /* The last level */
ft_attr.rtc_valid = false;
+ dest.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.destination_id = ctx->caps->eswitch_manager_vport_number;
fte_attr.action_flags = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- fte_attr.destination_id = ctx->caps->eswitch_manager_vport_number;
+ fte_attr.dests_num = 1;
+ fte_attr.dests = &dest;
default_miss = mlx5dr_cmd_forward_tbl_create(mlx5dr_context_get_local_ibv(ctx),
&ft_attr, &fte_attr);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 05/10] net/mlx5/hws: allow destination into default miss FT
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (3 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
` (5 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Erez Shitrit, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Erez Shitrit <erezsh@nvidia.com>
In FDB it will direct the packet into the hypervisor vport.
That allows the user to mirror packets into the default-miss vport.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_action.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index f068bc7e9c..6b62111593 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1769,6 +1769,17 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = 1;
break;
+ case MLX5DR_ACTION_TYP_MISS:
+ if (table_type != MLX5DR_TABLE_TYPE_FDB) {
+ DR_LOG(ERR, "Miss action supported for FDB only");
+ rte_errno = ENOTSUP;
+ goto free_dest_list;
+ }
+ dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest_list[i].destination_id =
+ ctx->caps->eswitch_manager_vport_number;
+ fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
case MLX5DR_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest_list[i].destination_id = dests[i].dest->vport.vport_num;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 06/10] net/mlx5/hws: support reformat for hws mirror
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (4 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
` (4 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Haifei Luo, Shun Hao, Suanming Mou, Matan Azrad,
Viacheslav Ovsiienko, Ori Kam
From: Haifei Luo <haifeil@nvidia.com>
In dest_array action, an optional reformat action can be applied to each
destination. This patch supports this by using the extended destination
entry.
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/common/mlx5/mlx5_prm.h | 15 +++++++
drivers/net/mlx5/hws/mlx5dr_action.c | 67 +++++++++++++++++++++++++++-
drivers/net/mlx5/hws/mlx5dr_action.h | 2 +
drivers/net/mlx5/hws/mlx5dr_cmd.c | 10 ++++-
drivers/net/mlx5/hws/mlx5dr_cmd.h | 2 +
5 files changed, 94 insertions(+), 2 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index aa0b622ca2..bced5a59dd 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -5352,6 +5352,21 @@ enum mlx5_parse_graph_arc_node_index {
MLX5_GRAPH_ARC_NODE_PROGRAMMABLE = 0x1f,
};
+enum mlx5_packet_reformat_context_reformat_type {
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xA,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xB,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xC,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_ADD_NISP_TNL = 0xD,
+ MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_REMOVE_NISP_TNL = 0xE,
+};
+
#define MLX5_PARSE_GRAPH_FLOW_SAMPLE_MAX 8
#define MLX5_PARSE_GRAPH_IN_ARC_MAX 8
#define MLX5_PARSE_GRAPH_OUT_ARC_MAX 8
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.c b/drivers/net/mlx5/hws/mlx5dr_action.c
index 6b62111593..11a7c58925 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.c
+++ b/drivers/net/mlx5/hws/mlx5dr_action.c
@@ -1703,6 +1703,44 @@ mlx5dr_action_create_modify_header(struct mlx5dr_context *ctx,
return NULL;
}
+static struct mlx5dr_devx_obj *
+mlx5dr_action_dest_array_process_reformat(struct mlx5dr_context *ctx,
+ enum mlx5dr_action_type type,
+ void *reformat_data,
+ size_t reformat_data_sz)
+{
+ struct mlx5dr_cmd_packet_reformat_create_attr pr_attr = {0};
+ struct mlx5dr_devx_obj *reformat_devx_obj;
+
+ if (!reformat_data || !reformat_data_sz) {
+ DR_LOG(ERR, "Empty reformat action or data");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ switch (type) {
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ pr_attr.type = MLX5_PACKET_REFORMAT_CONTEXT_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ DR_LOG(ERR, "Invalid value for reformat type");
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ pr_attr.reformat_param_0 = 0;
+ pr_attr.data_sz = reformat_data_sz;
+ pr_attr.data = reformat_data;
+
+ reformat_devx_obj = mlx5dr_cmd_packet_reformat_create(ctx->ibv_ctx, &pr_attr);
+ if (!reformat_devx_obj)
+ return NULL;
+
+ return reformat_devx_obj;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
size_t num_dest,
@@ -1710,6 +1748,7 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
uint32_t flags)
{
struct mlx5dr_cmd_set_fte_dest *dest_list = NULL;
+ struct mlx5dr_devx_obj *packet_reformat = NULL;
struct mlx5dr_cmd_ft_create_attr ft_attr = {0};
struct mlx5dr_cmd_set_fte_attr fte_attr = {0};
struct mlx5dr_cmd_forward_tbl *fw_island;
@@ -1796,6 +1835,21 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
dest_list[i].destination_id = dests[i].dest->devx_dest.devx_obj->id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2:
+ case MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3:
+ packet_reformat = mlx5dr_action_dest_array_process_reformat
+ (ctx,
+ *action_type,
+ dests[i].reformat.reformat_data,
+ dests[i].reformat.reformat_data_sz);
+ if (!packet_reformat)
+ goto free_dest_list;
+
+ dest_list[i].ext_flags |= MLX5DR_CMD_EXT_DEST_REFORMAT;
+ dest_list[i].ext_reformat = packet_reformat;
+ ft_attr.reformat_en = true;
+ fte_attr.extended_dest = 1;
+ break;
default:
DR_LOG(ERR, "Unsupported action in dest_array");
rte_errno = ENOTSUP;
@@ -1819,8 +1873,9 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
goto free_action;
action->dest_array.fw_island = fw_island;
+ action->dest_array.num_dest = num_dest;
+ action->dest_array.dest_list = dest_list;
- simple_free(dest_list);
return action;
free_action:
@@ -1828,6 +1883,10 @@ mlx5dr_action_create_dest_array(struct mlx5dr_context *ctx,
destroy_fw_island:
mlx5dr_cmd_forward_tbl_destroy(fw_island);
free_dest_list:
+ for (i = 0; i < num_dest; i++) {
+ if (dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj(dest_list[i].ext_reformat);
+ }
simple_free(dest_list);
return NULL;
}
@@ -1917,6 +1976,12 @@ static void mlx5dr_action_destroy_hws(struct mlx5dr_action *action)
case MLX5DR_ACTION_TYP_DEST_ARRAY:
mlx5dr_action_destroy_stcs(action);
mlx5dr_cmd_forward_tbl_destroy(action->dest_array.fw_island);
+ for (i = 0; i < action->dest_array.num_dest; i++) {
+ if (action->dest_array.dest_list[i].ext_reformat)
+ mlx5dr_cmd_destroy_obj
+ (action->dest_array.dest_list[i].ext_reformat);
+ }
+ simple_free(action->dest_array.dest_list);
break;
case MLX5DR_ACTION_TYP_REFORMAT_TNL_L3_TO_L2:
case MLX5DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/mlx5/hws/mlx5dr_action.h b/drivers/net/mlx5/hws/mlx5dr_action.h
index efe07c9d47..582a38bebc 100644
--- a/drivers/net/mlx5/hws/mlx5dr_action.h
+++ b/drivers/net/mlx5/hws/mlx5dr_action.h
@@ -153,6 +153,8 @@ struct mlx5dr_action {
} devx_dest;
struct {
struct mlx5dr_cmd_forward_tbl *fw_island;
+ size_t num_dest;
+ struct mlx5dr_cmd_set_fte_dest *dest_list;
} dest_array;
};
};
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.c b/drivers/net/mlx5/hws/mlx5dr_cmd.c
index 22f7c6b019..781de40c02 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.c
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.c
@@ -169,7 +169,9 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
uint32_t *in;
uint32_t i;
- dest_entry_sz = MLX5_ST_SZ_BYTES(dest_format);
+ dest_entry_sz = fte_attr->extended_dest ?
+ MLX5_ST_SZ_BYTES(extended_dest_format) :
+ MLX5_ST_SZ_BYTES(dest_format);
total_dest_sz = dest_entry_sz * fte_attr->dests_num;
inlen = align((MLX5_ST_SZ_BYTES(set_fte_in) + total_dest_sz), DW_SIZE);
in = simple_calloc(1, inlen);
@@ -192,6 +194,7 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_source, fte_attr->flow_source);
+ MLX5_SET(flow_context, in_flow_context, extended_destination, fte_attr->extended_dest);
MLX5_SET(set_fte_in, in, ignore_flow_level, fte_attr->ignore_flow_level);
action_flags = fte_attr->action_flags;
@@ -230,6 +233,11 @@ mlx5dr_cmd_set_fte(struct ibv_context *ctx,
dest->destination_type);
MLX5_SET(dest_format, in_dests, destination_id,
dest->destination_id);
+ if (dest->ext_flags & MLX5DR_CMD_EXT_DEST_REFORMAT) {
+ MLX5_SET(dest_format, in_dests, packet_reformat, 1);
+ MLX5_SET(extended_dest_format, in_dests, packet_reformat_id,
+ dest->ext_reformat->id);
+ }
break;
default:
rte_errno = EOPNOTSUPP;
diff --git a/drivers/net/mlx5/hws/mlx5dr_cmd.h b/drivers/net/mlx5/hws/mlx5dr_cmd.h
index 6f4aa3e320..28e5ea4726 100644
--- a/drivers/net/mlx5/hws/mlx5dr_cmd.h
+++ b/drivers/net/mlx5/hws/mlx5dr_cmd.h
@@ -14,6 +14,7 @@ struct mlx5dr_cmd_set_fte_dest {
uint8_t destination_type;
uint32_t destination_id;
enum mlx5dr_cmd_ext_dest_flags ext_flags;
+ struct mlx5dr_devx_obj *ext_reformat;
uint16_t esw_owner_vhca_id;
};
@@ -21,6 +22,7 @@ struct mlx5dr_cmd_set_fte_attr {
uint32_t action_flags;
uint8_t ignore_flow_level;
uint8_t flow_source;
+ uint8_t extended_dest;
uint8_t encrypt_decrypt_type;
uint32_t encrypt_decrypt_obj_id;
uint32_t packet_reformat_id;
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 07/10] net/mlx5: reformat HWS code for HWS mirror action
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (5 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 08/10] net/mlx5: support " Gregory Etelson
` (3 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for HWS mirror action.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 70 ++++++++++++++++++---------------
1 file changed, 39 insertions(+), 31 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6fcf654e4a..b2215fb5cf 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4548,6 +4548,17 @@ static enum mlx5dr_action_type mlx5_hw_dr_action_types[] = {
[RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL] = MLX5DR_ACTION_TYP_DEST_ROOT,
};
+static inline void
+action_template_set_type(struct rte_flow_actions_template *at,
+ enum mlx5dr_action_type *action_types,
+ unsigned int action_src, uint16_t *curr_off,
+ enum mlx5dr_action_type type)
+{
+ at->actions_off[action_src] = *curr_off;
+ action_types[*curr_off] = type;
+ *curr_off = *curr_off + 1;
+}
+
static int
flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
unsigned int action_src,
@@ -4565,9 +4576,8 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_TIR;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_TIR);
break;
case RTE_FLOW_ACTION_TYPE_AGE:
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -4575,23 +4585,20 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX) {
- *cnt_off = *curr_off;
- action_types[*cnt_off] = MLX5DR_ACTION_TYP_CTR;
- *curr_off = *curr_off + 1;
- }
+ if (*cnt_off == UINT16_MAX)
+ action_template_set_type(at, action_types,
+ action_src, curr_off,
+ MLX5DR_ACTION_TYP_CTR);
at->actions_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_CT);
break;
case RTE_FLOW_ACTION_TYPE_QUOTA:
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[action_src] = *curr_off;
- action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
- *curr_off = *curr_off + 1;
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_ASO_METER);
break;
default:
DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
@@ -5101,31 +5108,32 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
at->rx_cpy_pos = pos;
- /*
- * mlx5 PMD hacks indirect action index directly to the action conf.
- * The rte_flow_conv() function copies the content from conf pointer.
- * Need to restore the indirect action index from action conf here.
- */
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
+ const struct rte_flow_action_modify_field *info;
+
+ switch (actions->type) {
+ /*
+ * mlx5 PMD hacks indirect action index directly to the action conf.
+ * The rte_flow_conv() function copies the content from conf pointer.
+ * Need to restore the indirect action index from action conf here.
+ */
+ case RTE_FLOW_ACTION_TYPE_INDIRECT:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
- }
- if (actions->type == RTE_FLOW_ACTION_TYPE_MODIFY_FIELD) {
- const struct rte_flow_action_modify_field *info = actions->conf;
-
+ break;
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ info = actions->conf;
if ((info->dst.field == RTE_FLOW_FIELD_FLEX_ITEM &&
flow_hw_flex_item_acquire(dev, info->dst.flex_handle,
&at->flex_item)) ||
- (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
- flow_hw_flex_item_acquire(dev, info->src.flex_handle,
- &at->flex_item))) {
- rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to acquire flex item");
+ (info->src.field == RTE_FLOW_FIELD_FLEX_ITEM &&
+ flow_hw_flex_item_acquire(dev, info->src.flex_handle,
+ &at->flex_item)))
goto error;
- }
+ break;
+ default:
+ break;
}
}
at->tmpl = flow_hw_dr_actions_template_create(at);
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 08/10] net/mlx5: support HWS mirror action
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (6 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
` (2 subsequent siblings)
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
HWS mirror clones original packet to one or two destinations and
proceeds with the original packet path.
The mirror has no dedicated RTE flow action type.
Mirror object is referenced by INDIRECT_LIST action.
INDIRECT_LIST for a mirror built from actions list:
SAMPLE [/ SAMPLE] / <Orig. packet destination> / END
Mirror SAMPLE action defines packet clone. It specifies the clone
destination and optional clone reformat action.
Destination action for both clone and original packet depends on HCA
domain:
- for NIC RX, destination is ether RSS or QUEUE
- for FDB, destination is PORT
HWS mirror was implemented with the INDIRECT_LIST flow action.
MLX5 PMD defines general `struct mlx5_indirect_list` type for all.
INDIRECT_LIST handler objects:
struct mlx5_indirect_list {
enum mlx5_indirect_list_type type;
LIST_ENTRY(mlx5_indirect_list) chain;
char data[];
};
Specific INDIRECT_LIST type must overload `mlx5_indirect_list::data`
and provide unique `type` value.
PMD returns a pointer to `mlx5_indirect_list` object.
Existing non-masked actions template API cannot identify flow actions
in INDIRECT_LIST handler because INDIRECT_LIST handler can represent
several flow actions.
For example:
A: SAMPLE / JUMP
B: SAMPE / SAMPLE / RSS
Actions template command
template indirect_list / end mask indirect_list 0 / end
does not provide any information to differentiate between flow
actions in A and B.
MLX5 PMD requires INDIRECT_LIST configuration parameter in the
template section:
Non-masked INDIRECT_LIST API:
=============================
template indirect_list X / end mask indirect_list 0 / end
PMD identifies type of X handler and will use the same type in
template creation. Actual parameters for actions in the list will
be extracted from flow configuration
Masked INDIRECT_LIST API:
=========================
template indirect_list X / end mask indirect_list -lUL / end
PMD creates action template from actions types and configurations
referenced by X.
INDIRECT_LIST action without configuration is invalid and will be
rejected by PMD.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/rel_notes/release_23_11.rst | 1 +
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5.h | 2 +
drivers/net/mlx5/mlx5_flow.c | 134 ++++++
drivers/net/mlx5/mlx5_flow.h | 69 ++-
drivers/net/mlx5/mlx5_flow_hw.c | 616 ++++++++++++++++++++++++-
7 files changed, 819 insertions(+), 5 deletions(-)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index fc67415c6c..a85d755734 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -106,6 +106,7 @@ drop = Y
flag = Y
inc_tcp_ack = Y
inc_tcp_seq = Y
+indirect_list = Y
jump = Y
mark = Y
meter = Y
diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..81d606e773 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -143,6 +143,7 @@ New Features
* **Updated NVIDIA mlx5 net driver.**
* Added support for Network Service Header (NSH) flow matching.
+ * Added support for ``RTE_FLOW_ACTION_TYPE_INDIRECT_LIST`` flow action.
* **Updated Solarflare net driver.**
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..08b7b03365 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2168,6 +2168,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* Free the eCPRI flex parser resource. */
mlx5_flex_parser_ecpri_release(dev);
mlx5_flex_item_port_cleanup(dev);
+ mlx5_indirect_list_handles_release(dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
flow_hw_destroy_vport_action(dev);
flow_hw_resource_release(dev);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b709a1bda..f3b872f59c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1791,6 +1791,8 @@ struct mlx5_priv {
LIST_HEAD(ind_tables, mlx5_ind_table_obj) ind_tbls;
/* Standalone indirect tables. */
LIST_HEAD(stdl_ind_tables, mlx5_ind_table_obj) standalone_ind_tbls;
+ /* Objects created with indirect list action */
+ LIST_HEAD(indirect_list, mlx5_indirect_list) indirect_list_head;
/* Pointer to next element. */
rte_rwlock_t ind_tbls_lock;
uint32_t refcnt; /**< Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8ad85e6027..99b814d815 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -62,6 +62,30 @@ struct tunnel_default_miss_ctx {
};
};
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->indirect_list_head)) {
+ struct mlx5_indirect_list *e =
+ LIST_FIRST(&priv->indirect_list_head);
+
+ LIST_REMOVE(e, entry);
+ switch (e->type) {
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ break;
+#endif
+ default:
+ DRV_LOG(ERR, "invalid indirect list type");
+ MLX5_ASSERT(false);
+ break;
+ }
+ }
+}
+
static int
flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
struct rte_flow *flow,
@@ -1120,6 +1144,32 @@ mlx5_flow_async_action_handle_query_update
enum rte_flow_query_update_mode qu_mode,
void *user_data, struct rte_flow_error *error);
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1135,6 +1185,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.action_handle_update = mlx5_action_handle_update,
.action_handle_query = mlx5_action_handle_query,
.action_handle_query_update = mlx5_action_handle_query_update,
+ .action_list_handle_create = mlx5_action_list_handle_create,
+ .action_list_handle_destroy = mlx5_action_list_handle_destroy,
.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
.tunnel_match = mlx5_flow_tunnel_match,
.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1163,6 +1215,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.async_action_handle_query = mlx5_flow_async_action_handle_query,
.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
.async_actions_update = mlx5_flow_async_flow_update,
+ .async_action_list_handle_create =
+ mlx5_flow_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ mlx5_flow_async_action_list_handle_destroy,
};
/* Tunnel information. */
@@ -10869,6 +10925,84 @@ mlx5_action_handle_query_update(struct rte_eth_dev *dev,
query, qu_mode, error);
}
+
+#define MLX5_DRV_FOPS_OR_ERR(dev, fops, drv_cb, ret) \
+{ \
+ struct rte_flow_attr attr = { .transfer = 0 }; \
+ enum mlx5_flow_drv_type drv_type = flow_get_drv_type((dev), &attr); \
+ if (drv_type == MLX5_FLOW_TYPE_MIN || \
+ drv_type == MLX5_FLOW_TYPE_MAX) { \
+ rte_flow_error_set(error, ENOTSUP, \
+ RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "invalid driver type"); \
+ return ret; \
+ } \
+ (fops) = flow_get_drv_ops(drv_type); \
+ if (!(fops) || !(fops)->drv_cb) { \
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, \
+ NULL, "no action_list handler"); \
+ return ret; \
+ } \
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_create, NULL);
+ return fops->action_list_handle_create(dev, conf, actions, error);
+}
+
+static int
+mlx5_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, action_list_handle_destroy, ENOTSUP);
+ return fops->action_list_handle_destroy(dev, handle, error);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_flow_async_action_list_handle_create(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct
+ rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops, async_action_list_handle_create, NULL);
+ return fops->async_action_list_handle_create(dev, queue_id, op_attr,
+ conf, actions, user_data,
+ error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_destroy, ENOTSUP);
+ return fops->async_action_list_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data,
+ error);
+}
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 903ff66d72..580db80fd4 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -65,7 +65,7 @@ enum mlx5_rte_flow_field_id {
(((uint32_t)(uintptr_t)(handle)) & \
((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
-enum {
+enum mlx5_indirect_type {
MLX5_INDIRECT_ACTION_TYPE_RSS,
MLX5_INDIRECT_ACTION_TYPE_AGE,
MLX5_INDIRECT_ACTION_TYPE_COUNT,
@@ -97,6 +97,28 @@ enum {
#define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
+enum mlx5_indirect_list_type {
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+};
+
+/*
+ * Base type for indirect list type.
+ * Actual indirect list type MUST override that type and put type spec data
+ * after the `chain`.
+ */
+struct mlx5_indirect_list {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+ /* put type specific data after chain */
+};
+
+static __rte_always_inline enum mlx5_indirect_list_type
+mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+{
+ return obj->type;
+}
+
/* Matches on selected register. */
struct mlx5_rte_flow_item_tag {
enum modify_reg id;
@@ -1218,6 +1240,10 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
+struct mlx5dr_action;
+typedef struct mlx5dr_action *
+(*indirect_list_callback_t)(const struct rte_flow_action *);
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
@@ -1266,6 +1292,9 @@ struct mlx5_action_construct_data {
struct {
uint32_t id;
} shared_meter;
+ struct {
+ indirect_list_callback_t cb;
+ } indirect_list;
};
};
@@ -1776,6 +1805,17 @@ typedef int (*mlx5_flow_action_query_update_t)
const void *update, void *data,
enum rte_flow_query_update_mode qu_mode,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_action_list_handle_create_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_sync_domain_t)
(struct rte_eth_dev *dev,
uint32_t domains,
@@ -1964,6 +2004,20 @@ typedef int (*mlx5_flow_async_action_handle_destroy_t)
struct rte_flow_action_handle *handle,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow_action_list_handle *
+(*mlx5_flow_async_action_list_handle_create_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_destroy_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_list_handle *action_handle,
+ void *user_data, struct rte_flow_error *error);
+
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -1999,6 +2053,8 @@ struct mlx5_flow_driver_ops {
mlx5_flow_action_update_t action_update;
mlx5_flow_action_query_t action_query;
mlx5_flow_action_query_update_t action_query_update;
+ mlx5_flow_action_list_handle_create_t action_list_handle_create;
+ mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
mlx5_flow_sync_domain_t sync_domain;
mlx5_flow_discover_priorities_t discover_priorities;
mlx5_flow_item_create_t item_create;
@@ -2025,6 +2081,10 @@ struct mlx5_flow_driver_ops {
mlx5_flow_async_action_handle_query_update_t async_action_query_update;
mlx5_flow_async_action_handle_query_t async_action_query;
mlx5_flow_async_action_handle_destroy_t async_action_destroy;
+ mlx5_flow_async_action_list_handle_create_t
+ async_action_list_handle_create;
+ mlx5_flow_async_action_list_handle_destroy_t
+ async_action_list_handle_destroy;
};
/* mlx5_flow.c */
@@ -2755,4 +2815,11 @@ flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
#endif
return UINT32_MAX;
}
+void
+mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
+#ifdef HAVE_MLX5_HWS_SUPPORT
+struct mlx5_mirror;
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b2215fb5cf..b11348c99c 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -58,6 +58,24 @@
#define MLX5_HW_VLAN_PUSH_VID_IDX 1
#define MLX5_HW_VLAN_PUSH_PCP_IDX 2
+#define MLX5_MIRROR_MAX_CLONES_NUM 3
+#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+
+struct mlx5_mirror_clone {
+ enum rte_flow_action_type type;
+ void *action_ctx;
+};
+
+struct mlx5_mirror {
+ /* type field MUST be the first */
+ enum mlx5_indirect_list_type type;
+ LIST_ENTRY(mlx5_indirect_list) entry;
+
+ uint32_t clones_num;
+ struct mlx5dr_action *mirror_action;
+ struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
+};
+
static int flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev);
static int flow_hw_translate_group(struct rte_eth_dev *dev,
const struct mlx5_flow_template_table_cfg *cfg,
@@ -568,6 +586,22 @@ __flow_hw_act_data_general_append(struct mlx5_priv *priv,
return 0;
}
+static __rte_always_inline int
+flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
+ struct mlx5_hw_actions *acts,
+ enum rte_flow_action_type type,
+ uint16_t action_src, uint16_t action_dst,
+ indirect_list_callback_t cb)
+{
+ struct mlx5_action_construct_data *act_data;
+
+ act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
+ if (!act_data)
+ return -1;
+ act_data->indirect_list.cb = cb;
+ LIST_INSERT_HEAD(&acts->act_list, act_data, next);
+ return 0;
+}
/**
* Append dynamic encap action to the dynamic action list.
*
@@ -1383,6 +1417,48 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
+static struct mlx5dr_action *
+flow_hw_mirror_action(const struct rte_flow_action *action)
+{
+ struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+
+ return mirror->mirror_action;
+}
+
+static int
+table_template_translate_indirect_list(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src,
+ uint16_t action_dst)
+{
+ int ret;
+ bool is_masked = action->conf && mask->conf;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type;
+
+ if (!action->conf)
+ return -EINVAL;
+ type = mlx5_get_indirect_list_type(action->conf);
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ if (is_masked) {
+ acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
+ } else {
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_mirror_action);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Translate rte_flow actions to DR action.
*
@@ -1419,7 +1495,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
struct rte_flow_action *actions = at->actions;
struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
- enum mlx5dr_action_type refmt_type = 0;
+ enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
const struct rte_flow_item *enc_item = NULL, *enc_item_m = NULL;
uint16_t reformat_src = 0;
@@ -1433,7 +1509,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
- int err;
+ int ret, err;
uint32_t target_grp = 0;
int table_type;
@@ -1445,7 +1521,20 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
- switch (actions->type) {
+ switch ((int)actions->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ action_pos = at->actions_off[actions - at->actions];
+ if (!attr->group) {
+ DRV_LOG(ERR, "Indirect action is not supported in root table.");
+ goto err;
+ }
+ ret = table_template_translate_indirect_list
+ (dev, actions, masks, acts,
+ actions - action_start,
+ action_pos);
+ if (ret)
+ goto err;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
@@ -2301,7 +2390,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
MLX5_ASSERT(action->type ==
RTE_FLOW_ACTION_TYPE_INDIRECT ||
(int)action->type == act_data->type);
- switch (act_data->type) {
+ switch ((int)act_data->type) {
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ rule_acts[act_data->action_dst].action =
+ act_data->indirect_list.cb(action);
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
(dev, queue, action, table, it_idx,
@@ -4366,6 +4459,8 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_validate_action_indirect(dev, action,
mask,
@@ -4607,6 +4702,28 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
return 0;
}
+
+static int
+flow_hw_template_actions_list(struct rte_flow_actions_template *at,
+ unsigned int action_src,
+ enum mlx5dr_action_type *action_types,
+ uint16_t *curr_off)
+{
+ enum mlx5_indirect_list_type list_type;
+
+ list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ action_template_set_type(at, action_types, action_src, curr_off,
+ MLX5DR_ACTION_TYP_DEST_ARRAY);
+ break;
+ default:
+ DRV_LOG(ERR, "Unsupported indirect list type");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* Create DR action template based on a provided sequence of flow actions.
*
@@ -4639,6 +4756,12 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
switch (at->actions[i].type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
+ ret = flow_hw_template_actions_list(at, i, action_types,
+ &curr_off);
+ if (ret)
+ return NULL;
+ break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
(&at->masks[i],
@@ -5119,6 +5242,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
+ case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
at->actions[i].conf = actions->conf;
at->masks[i].conf = masks->conf;
break;
@@ -9354,6 +9478,484 @@ flow_hw_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
return flow_hw_get_q_aged_flows(dev, 0, contexts, nb_contexts, error);
}
+static void
+mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone)
+{
+ switch (clone->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ mlx5_hrxq_release(dev,
+ ((struct mlx5_hrxq *)(clone->action_ctx))->idx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ flow_hw_jump_release(dev, clone->action_ctx);
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ default:
+ break;
+ }
+}
+
+void
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+{
+ uint32_t i;
+
+ if (mirror->entry.le_prev)
+ LIST_REMOVE(mirror, entry);
+ for (i = 0; i < mirror->clones_num; i++)
+ mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
+ if (mirror->mirror_action)
+ mlx5dr_action_destroy(mirror->mirror_action);
+ if (release)
+ mlx5_free(mirror);
+}
+
+static inline enum mlx5dr_table_type
+get_mlx5dr_table_type(const struct rte_flow_attr *attr)
+{
+ enum mlx5dr_table_type type;
+
+ if (attr->transfer)
+ type = MLX5DR_TABLE_TYPE_FDB;
+ else if (attr->egress)
+ type = MLX5DR_TABLE_TYPE_NIC_TX;
+ else
+ type = MLX5DR_TABLE_TYPE_NIC_RX;
+ return type;
+}
+
+static __rte_always_inline bool
+mlx5_mirror_terminal_action(const struct rte_flow_action *action)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static bool
+mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ if (priv->sh->esw_mode)
+ return false;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (!priv->sh->esw_mode)
+ return false;
+ if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
+ action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Valid mirror actions list includes one or two SAMPLE actions
+ * followed by JUMP.
+ *
+ * @return
+ * Number of mirrors *action* list was valid.
+ * -EINVAL otherwise.
+ */
+static int
+mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions)
+{
+ if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ int i = 1;
+ bool valid;
+ const struct rte_flow_action_sample *sample = actions[0].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ i = 2;
+ sample = actions[1].conf;
+ valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ if (!valid)
+ return -EINVAL;
+ }
+ return mlx5_mirror_terminal_action(actions + i) ? i + 1 : -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int
+mirror_format_tir(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_hrxq *tir_ctx;
+
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ tir_ctx = flow_hw_tir_action_register(dev, hws_flags, action);
+ if (!tir_ctx)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create QUEUE action for mirror clone");
+ dest_attr->dest = tir_ctx->action;
+ clone->action_ctx = tir_ctx;
+ return 0;
+}
+
+static int
+mirror_format_jump(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_jump *jump_conf = action->conf;
+ struct mlx5_hw_jump_action *jump = flow_hw_jump_action_register
+ (dev, table_cfg,
+ jump_conf->group, error);
+
+ if (!jump)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "failed to create JUMP action for mirror clone");
+ dest_attr->dest = jump->hws_action;
+ clone->action_ctx = jump;
+ return 0;
+}
+
+static int
+mirror_format_port(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ struct rte_flow_error __rte_unused *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_ethdev *port_action = action->conf;
+
+ dest_attr->dest = priv->hw_vport[port_action->port_id];
+ return 0;
+}
+
+#define MLX5_CONST_ENCAP_ITEM(encap_type, ptr) \
+(((const struct encap_type *)(ptr))->definition)
+
+static int
+hw_mirror_clone_reformat(const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ enum mlx5dr_action_type *action_type,
+ uint8_t *reformat_buf, bool decap)
+{
+ int ret;
+ const struct rte_flow_item *encap_item = NULL;
+ const struct rte_flow_action_raw_encap *encap_conf = NULL;
+ typeof(dest_attr->reformat) *reformat = &dest_attr->reformat;
+
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ encap_conf = actions[0].conf;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_vxlan_encap,
+ actions);
+ break;
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ encap_item = MLX5_CONST_ENCAP_ITEM(rte_flow_action_nvgre_encap,
+ actions);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *action_type = decap ?
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L3 :
+ MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
+ if (encap_item) {
+ ret = flow_dv_convert_encap_data(encap_item, reformat_buf,
+ &reformat->reformat_data_sz, NULL);
+ if (ret)
+ return -EINVAL;
+ reformat->reformat_data = reformat_buf;
+ } else {
+ reformat->reformat_data = (void *)(uintptr_t)encap_conf->data;
+ reformat->reformat_data_sz = encap_conf->size;
+ }
+ return 0;
+}
+
+static int
+hw_mirror_format_clone(struct rte_eth_dev *dev,
+ struct mlx5_mirror_clone *clone,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct mlx5dr_action_dest_attr *dest_attr,
+ uint8_t *reformat_buf, struct rte_flow_error *error)
+{
+ int ret;
+ uint32_t i;
+ bool decap_seen = false;
+
+ for (i = 0; actions[i].type != RTE_FLOW_ACTION_TYPE_END; i++) {
+ dest_attr->action_type[i] = mlx5_hw_dr_action_types[actions[i].type];
+ switch (actions[i].type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mirror_format_tir(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ ret = mirror_format_port(dev, &actions[i],
+ dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = mirror_format_jump(dev, clone, table_cfg,
+ &actions[i], dest_attr, error);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap_seen = true;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = hw_mirror_clone_reformat(&actions[i], dest_attr,
+ &dest_attr->action_type[i],
+ reformat_buf, decap_seen);
+ if (ret < 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i],
+ "failed to create reformat action");
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ &actions[i], "unsupported sample action");
+ }
+ clone->type = actions->type;
+ }
+ dest_attr->action_type[i] = MLX5DR_ACTION_TYP_LAST;
+ return 0;
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
+ const struct mlx5_flow_template_table_cfg *table_cfg,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ uint32_t hws_flags;
+ int ret = 0, i, clones_num;
+ struct mlx5_mirror *mirror;
+ enum mlx5dr_table_type table_type;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
+ struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
+ enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
+ [MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN + 1];
+
+ memset(mirror_attr, 0, sizeof(mirror_attr));
+ memset(array_action_types, 0, sizeof(array_action_types));
+ table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ if (clones_num < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid mirror list format");
+ return NULL;
+ }
+ mirror = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mirror),
+ 0, SOCKET_ID_ANY);
+ if (!mirror) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to allocate mirror context");
+ return NULL;
+ }
+ mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ mirror->clones_num = clones_num;
+ for (i = 0; i < clones_num; i++) {
+ const struct rte_flow_action *clone_actions;
+
+ mirror_attr[i].action_type = array_action_types[i];
+ if (actions[i].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
+ const struct rte_flow_action_sample *sample = actions[i].conf;
+
+ clone_actions = sample->actions;
+ } else {
+ clone_actions = &actions[i];
+ }
+ ret = hw_mirror_format_clone(dev, &mirror->clone[i], table_cfg,
+ clone_actions, &mirror_attr[i],
+ reformat_buf[i], error);
+
+ if (ret)
+ goto error;
+ }
+ hws_flags |= MLX5DR_ACTION_FLAG_SHARED;
+ mirror->mirror_action = mlx5dr_action_create_dest_array(priv->dr_ctx,
+ clones_num,
+ mirror_attr,
+ hws_flags);
+ if (!mirror->mirror_action) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Failed to create HWS mirror action");
+ goto error;
+ }
+
+ LIST_INSERT_HEAD(&priv->indirect_list_head,
+ (struct mlx5_indirect_list *)mirror, entry);
+ return (struct rte_flow_action_list_handle *)mirror;
+
+error:
+ mlx5_hw_mirror_destroy(dev, mirror, true);
+ return NULL;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct rte_flow_action_list_handle *handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_template_table_cfg table_cfg = {
+ .external = true,
+ .attr = {
+ .flow_attr = {
+ .ingress = conf->ingress,
+ .egress = conf->egress,
+ .transfer = conf->transfer
+ }
+ }
+ };
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "No action list");
+ return NULL;
+ }
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+ error);
+ if (!job)
+ return NULL;
+ }
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
+ actions, error);
+ break;
+ default:
+ handle = NULL;
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid list");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ return handle;
+}
+
+static struct rte_flow_action_list_handle *
+flow_hw_action_list_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_create(dev, MLX5_HW_INV_QUEUE,
+ NULL, conf, actions,
+ NULL, error);
+}
+
+static int
+flow_hw_async_action_list_handle_destroy
+ (struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_action_list_handle *handle,
+ void *user_data, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct mlx5_hw_q_job *job = NULL;
+ bool push = flow_hw_action_push(attr);
+ struct mlx5_priv *priv = dev->data->dev_private;
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((void *)handle);
+
+ if (attr) {
+ job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+ NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+ error);
+ if (!job)
+ return rte_errno;
+ }
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ break;
+ default:
+ handle = NULL;
+ ret = rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid indirect list handle");
+ }
+ if (job) {
+ job->action = handle;
+ flow_hw_action_finalize(dev, queue, job, push, false,
+ handle != NULL);
+ }
+ mlx5_free(handle);
+ return ret;
+}
+
+static int
+flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
+ struct rte_flow_action_list_handle *handle,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_destroy(dev, MLX5_HW_INV_QUEUE,
+ NULL, handle, NULL,
+ error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -9382,6 +9984,12 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_update = flow_hw_action_update,
.action_query = flow_hw_action_query,
.action_query_update = flow_hw_action_query_update,
+ .action_list_handle_create = flow_hw_action_list_handle_create,
+ .action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .async_action_list_handle_create =
+ flow_hw_async_action_list_handle_create,
+ .async_action_list_handle_destroy =
+ flow_hw_async_action_list_handle_destroy,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 09/10] net/mlx5: reformat HWS code for indirect list actions
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (7 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 08/10] net/mlx5: support " Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-29 7:53 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Raslan Darawsheh
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Reformat HWS code for indirect list actions.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 4 +-
drivers/net/mlx5/mlx5_flow_hw.c | 235 ++++++++++++++++++--------------
2 files changed, 131 insertions(+), 108 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 580db80fd4..653f83cf55 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1331,11 +1331,11 @@ struct rte_flow_actions_template {
uint64_t action_flags; /* Bit-map of all valid action in template. */
uint16_t dr_actions_num; /* Amount of DR rules actions. */
uint16_t actions_num; /* Amount of flow actions */
- uint16_t *actions_off; /* DR action offset for given rte action offset. */
+ uint16_t *dr_off; /* DR action offset for given rte action offset. */
+ uint16_t *src_off; /* RTE action displacement from app. template */
uint16_t reformat_off; /* Offset of DR reformat action. */
uint16_t mhdr_off; /* Offset of DR modify header action. */
uint32_t refcnt; /* Reference counter. */
- uint16_t rx_cpy_pos; /* Action position of Rx metadata to be copied. */
uint8_t flex_item; /* flex item index. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b11348c99c..f9f735ba75 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1015,11 +1015,11 @@ flow_hw_modify_field_init(struct mlx5_hw_modify_header_action *mhdr,
static __rte_always_inline int
flow_hw_modify_field_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start, /* Start of AT actions. */
const struct rte_flow_action *action, /* Current action from AT. */
const struct rte_flow_action *action_mask, /* Current mask from AT. */
struct mlx5_hw_actions *acts,
struct mlx5_hw_modify_header_action *mhdr,
+ uint16_t src_pos,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1122,7 +1122,7 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
if (shared)
return 0;
ret = __flow_hw_act_data_hdr_modify_append(priv, acts, RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
- action - action_start, mhdr->pos,
+ src_pos, mhdr->pos,
cmds_start, cmds_end, shared,
field, dcopy, mask);
if (ret)
@@ -1181,11 +1181,10 @@ flow_hw_validate_compiled_modify_field(struct rte_eth_dev *dev,
static int
flow_hw_represented_port_compile(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
- const struct rte_flow_action *action_start,
const struct rte_flow_action *action,
const struct rte_flow_action *action_mask,
struct mlx5_hw_actions *acts,
- uint16_t action_dst,
+ uint16_t action_src, uint16_t action_dst,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
@@ -1241,7 +1240,7 @@ flow_hw_represented_port_compile(struct rte_eth_dev *dev,
} else {
ret = __flow_hw_act_data_general_append
(priv, acts, action->type,
- action - action_start, action_dst);
+ action_src, action_dst);
if (ret)
return rte_flow_error_set
(error, ENOMEM,
@@ -1493,7 +1492,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
const struct rte_flow_template_table_attr *table_attr = &cfg->attr;
const struct rte_flow_attr *attr = &table_attr->flow_attr;
struct rte_flow_action *actions = at->actions;
- struct rte_flow_action *action_start = actions;
struct rte_flow_action *masks = at->masks;
enum mlx5dr_action_type refmt_type = MLX5DR_ACTION_TYP_LAST;
const struct rte_flow_action_raw_encap *raw_encap_data;
@@ -1506,7 +1504,6 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
uint32_t type;
bool reformat_used = false;
unsigned int of_vlan_offset;
- uint16_t action_pos;
uint16_t jump_pos;
uint32_t ct_idx;
int ret, err;
@@ -1521,71 +1518,69 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
else
type = MLX5DR_TABLE_TYPE_NIC_RX;
for (; !actions_end; actions++, masks++) {
+ uint64_t pos = actions - at->actions;
+ uint16_t src_pos = pos - at->src_off[pos];
+ uint16_t dr_pos = at->dr_off[pos];
+
switch ((int)actions->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
ret = table_template_translate_indirect_list
- (dev, actions, masks, acts,
- actions - action_start,
- action_pos);
+ (dev, actions, masks, acts, src_pos, dr_pos);
if (ret)
goto err;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- action_pos = at->actions_off[actions - at->actions];
if (!attr->group) {
DRV_LOG(ERR, "Indirect action is not supported in root table.");
goto err;
}
if (actions->conf && masks->conf) {
if (flow_hw_shared_action_translate
- (dev, actions, acts, actions - action_start, action_pos))
+ (dev, actions, acts, src_pos, dr_pos))
goto err;
} else if (__flow_hw_act_data_general_append
- (priv, acts, actions->type,
- actions - action_start, action_pos)){
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT,
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_VOID:
break;
case RTE_FLOW_ACTION_TYPE_DROP:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_drop[!!attr->group];
break;
case RTE_FLOW_ACTION_TYPE_MARK:
- action_pos = at->actions_off[actions - at->actions];
acts->mark = true;
if (masks->conf &&
((const struct rte_flow_action_mark *)
masks->conf)->id)
- acts->rule_acts[action_pos].tag.value =
+ acts->rule_acts[dr_pos].tag.value =
mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type, actions - action_start, action_pos))
+ actions->type,
+ src_pos, dr_pos))
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_tag[!!attr->group];
__atomic_fetch_add(&priv->hws_mark_refcnt, 1, __ATOMIC_RELAXED);
flow_hw_rxq_flag_set(dev, true);
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_push_vlan[type];
if (is_template_masked_push_vlan(masks->conf))
- acts->rule_acts[action_pos].push_vlan.vlan_hdr =
+ acts->rule_acts[dr_pos].push_vlan.vlan_hdr =
vlan_hdr_to_be32(actions);
else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos))
+ src_pos, dr_pos))
goto err;
of_vlan_offset = is_of_vlan_pcp_present(actions) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
@@ -1594,12 +1589,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
masks += of_vlan_offset;
break;
case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- action_pos = at->actions_off[actions - at->actions];
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
priv->hw_pop_vlan[type];
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_jump *)
masks->conf)->group) {
@@ -1610,17 +1603,16 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
(dev, cfg, jump_group, error);
if (!acts->jump)
goto err;
- acts->rule_acts[action_pos].action = (!!attr->group) ?
- acts->jump->hws_action :
- acts->jump->root_action;
+ acts->rule_acts[dr_pos].action = (!!attr->group) ?
+ acts->jump->hws_action :
+ acts->jump->root_action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)){
+ src_pos, dr_pos)){
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_queue *)
masks->conf)->index) {
@@ -1630,16 +1622,15 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf) {
acts->tir = flow_hw_tir_action_register
(dev,
@@ -1647,11 +1638,11 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
actions);
if (!acts->tir)
goto err;
- acts->rule_acts[action_pos].action =
+ acts->rule_acts[dr_pos].action =
acts->tir->action;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
@@ -1663,7 +1654,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_vxlan_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
@@ -1674,7 +1665,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
enc_item_m = ((const struct rte_flow_action_nvgre_encap *)
masks->conf)->definition;
reformat_used = true;
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
refmt_type = MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -1704,7 +1695,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
refmt_type =
MLX5DR_ACTION_TYP_REFORMAT_L2_TO_TNL_L2;
}
- reformat_src = actions - action_start;
+ reformat_src = src_pos;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
reformat_used = true;
@@ -1720,34 +1711,22 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
table_type = attr->ingress ? MLX5DR_TABLE_TYPE_NIC_RX :
((attr->egress) ? MLX5DR_TABLE_TYPE_NIC_TX :
- MLX5DR_TABLE_TYPE_FDB);
- acts->rule_acts[action_pos].action = priv->hw_send_to_kernel[table_type];
+ MLX5DR_TABLE_TYPE_FDB);
+ acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[table_type];
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- err = flow_hw_modify_field_compile(dev, attr, action_start,
- actions, masks, acts, &mhdr,
- error);
+ err = flow_hw_modify_field_compile(dev, attr, actions,
+ masks, acts, &mhdr,
+ src_pos, error);
if (err)
goto err;
- /*
- * Adjust the action source position for the following.
- * ... / MODIFY_FIELD: rx_cpy_pos / (QUEUE|RSS) / ...
- * The next action will be Q/RSS, there will not be
- * another adjustment and the real source position of
- * the following actions will be decreased by 1.
- * No change of the total actions in the new template.
- */
- if ((actions - action_start) == at->rx_cpy_pos)
- action_start += 1;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- action_pos = at->actions_off[actions - at->actions];
if (flow_hw_represented_port_compile
- (dev, attr, action_start, actions,
- masks, acts, action_pos, error))
+ (dev, attr, actions,
+ masks, acts, src_pos, dr_pos, error))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_METER:
@@ -1756,19 +1735,18 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* Calculated DR offset is stored only for ASO_METER and FT
* is assumed to be the next action.
*/
- action_pos = at->actions_off[actions - at->actions];
- jump_pos = action_pos + 1;
+ jump_pos = dr_pos + 1;
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter *)
masks->conf)->mtr_id) {
err = flow_hw_meter_compile(dev, cfg,
- action_pos, jump_pos, actions, acts, error);
+ dr_pos, jump_pos, actions, acts, error);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
@@ -1781,11 +1759,10 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
NULL,
"Age action on root table is not supported in HW steering mode");
}
- action_pos = at->actions_off[actions - at->actions];
if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
@@ -1806,49 +1783,46 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
* counter.
*/
break;
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf &&
((const struct rte_flow_action_count *)
masks->conf)->id) {
- err = flow_hw_cnt_compile(dev, action_pos, acts);
+ err = flow_hw_cnt_compile(dev, dr_pos, acts);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
- action_pos = at->actions_off[actions - at->actions];
if (masks->conf) {
ct_idx = MLX5_ACTION_CTX_CT_GET_IDX
((uint32_t)(uintptr_t)actions->conf);
if (flow_hw_ct_compile(dev, MLX5_HW_INV_QUEUE, ct_idx,
- &acts->rule_acts[action_pos]))
+ &acts->rule_acts[dr_pos]))
goto err;
} else if (__flow_hw_act_data_general_append
(priv, acts, actions->type,
- actions - action_start, action_pos)) {
+ src_pos, dr_pos)) {
goto err;
}
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- action_pos = at->actions_off[actions - at->actions];
if (actions->conf && masks->conf &&
((const struct rte_flow_action_meter_mark *)
masks->conf)->profile) {
err = flow_hw_meter_mark_compile(dev,
- action_pos, actions,
- acts->rule_acts,
- &acts->mtr_id,
- MLX5_HW_INV_QUEUE);
+ dr_pos, actions,
+ acts->rule_acts,
+ &acts->mtr_id,
+ MLX5_HW_INV_QUEUE);
if (err)
goto err;
} else if (__flow_hw_act_data_general_append(priv, acts,
- actions->type,
- actions - action_start,
- action_pos))
+ actions->type,
+ src_pos,
+ dr_pos))
goto err;
break;
case RTE_FLOW_ACTION_TYPE_END:
@@ -1931,7 +1905,7 @@ __flow_hw_actions_translate(struct rte_eth_dev *dev,
if (shared_rfmt)
acts->rule_acts[at->reformat_off].reformat.offset = 0;
else if (__flow_hw_act_data_encap_append(priv, acts,
- (action_start + reformat_src)->type,
+ (at->actions + reformat_src)->type,
reformat_src, at->reformat_off, data_size))
goto err;
acts->encap_decap->shared = shared_rfmt;
@@ -4283,6 +4257,31 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * Process `... / raw_decap / raw_encap / ...` actions sequence.
+ * The PMD handles the sequence as a single encap or decap reformat action,
+ * depending on the raw_encap configuration.
+ *
+ * The function assumes that the raw_decap / raw_encap location
+ * in actions template list complies with relative HWS actions order:
+ * for the required reformat configuration:
+ * ENCAP configuration must appear before [JUMP|DROP|PORT]
+ * DECAP configuration must appear at the template head.
+ */
+static uint64_t
+mlx5_decap_encap_reformat_type(const struct rte_flow_action *actions,
+ uint32_t encap_ind, uint64_t flags)
+{
+ const struct rte_flow_action_raw_encap *encap = actions[encap_ind].conf;
+
+ if ((flags & MLX5_FLOW_ACTION_DECAP) == 0)
+ return MLX5_FLOW_ACTION_ENCAP;
+ if (actions[encap_ind - 1].type != RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ return MLX5_FLOW_ACTION_ENCAP;
+ return encap->size >= MLX5_ENCAPSULATION_DECISION_SIZE ?
+ MLX5_FLOW_ACTION_ENCAP : MLX5_FLOW_ACTION_DECAP;
+}
+
static inline uint16_t
flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
struct rte_flow_action masks[],
@@ -4320,13 +4319,13 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
*/
for (i = act_num - 2; (int)i >= 0; i--) {
enum rte_flow_action_type type = actions[i].type;
+ uint64_t reformat_type;
if (type == RTE_FLOW_ACTION_TYPE_INDIRECT)
type = masks[i].type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
case RTE_FLOW_ACTION_TYPE_DROP:
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
case RTE_FLOW_ACTION_TYPE_JUMP:
@@ -4337,10 +4336,20 @@ flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_END:
break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ reformat_type =
+ mlx5_decap_encap_reformat_type(actions, i,
+ flags);
+ if (reformat_type == MLX5_FLOW_ACTION_DECAP) {
+ i++;
+ goto insert;
+ }
+ if (actions[i - 1].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP)
+ i--;
+ break;
default:
i++; /* new MF inserted AFTER actions[i] */
goto insert;
- break;
}
}
i = 0;
@@ -4649,7 +4658,7 @@ action_template_set_type(struct rte_flow_actions_template *at,
unsigned int action_src, uint16_t *curr_off,
enum mlx5dr_action_type type)
{
- at->actions_off[action_src] = *curr_off;
+ at->dr_off[action_src] = *curr_off;
action_types[*curr_off] = type;
*curr_off = *curr_off + 1;
}
@@ -4680,11 +4689,13 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
* Both AGE and COUNT action need counter, the first one fills
* the action_types array, and the second only saves the offset.
*/
- if (*cnt_off == UINT16_MAX)
+ if (*cnt_off == UINT16_MAX) {
+ *cnt_off = *curr_off;
action_template_set_type(at, action_types,
action_src, curr_off,
MLX5DR_ACTION_TYP_CTR);
- at->actions_off[action_src] = *cnt_off;
+ }
+ at->dr_off[action_src] = *cnt_off;
break;
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4804,7 +4815,7 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
}
break;
case RTE_FLOW_ACTION_TYPE_METER:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4812,14 +4823,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
i += is_of_vlan_pcp_present(at->actions + i) ?
MLX5_HW_VLAN_PUSH_PCP_IDX :
MLX5_HW_VLAN_PUSH_VID_IDX;
break;
case RTE_FLOW_ACTION_TYPE_METER_MARK:
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = MLX5DR_ACTION_TYP_ASO_METER;
if (curr_off >= MLX5_HW_MAX_ACTS)
goto err_actions_num;
@@ -4835,11 +4846,11 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
cnt_off = curr_off++;
action_types[cnt_off] = MLX5DR_ACTION_TYP_CTR;
}
- at->actions_off[i] = cnt_off;
+ at->dr_off[i] = cnt_off;
break;
default:
type = mlx5_hw_dr_action_types[at->actions[i].type];
- at->actions_off[i] = curr_off;
+ at->dr_off[i] = curr_off;
action_types[curr_off++] = type;
break;
}
@@ -5112,6 +5123,7 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
uint32_t expand_mf_num = 0;
+ uint16_t src_off[MLX5_HW_MAX_ACTS] = {0, };
if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
&action_flags, error))
@@ -5190,6 +5202,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
act_num,
expand_mf_num);
act_num += expand_mf_num;
+ for (i = pos + expand_mf_num; i < act_num; i++)
+ src_off[i] += expand_mf_num;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
}
act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
@@ -5200,7 +5214,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
return NULL;
len += RTE_ALIGN(mask_len, 16);
- len += RTE_ALIGN(act_num * sizeof(*at->actions_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->dr_off), 16);
+ len += RTE_ALIGN(act_num * sizeof(*at->src_off), 16);
at = mlx5_malloc(MLX5_MEM_ZERO, len + sizeof(*at),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (!at) {
@@ -5224,13 +5239,15 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
if (mask_len <= 0)
goto error;
/* DR actions offsets in the third part. */
- at->actions_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->dr_off = (uint16_t *)((uint8_t *)at->masks + mask_len);
+ at->src_off = RTE_PTR_ADD(at->dr_off,
+ RTE_ALIGN(act_num * sizeof(*at->dr_off), 16));
+ memcpy(at->src_off, src_off, act_num * sizeof(at->src_off[0]));
at->actions_num = act_num;
for (i = 0; i < at->actions_num; ++i)
- at->actions_off[i] = UINT16_MAX;
+ at->dr_off[i] = UINT16_MAX;
at->reformat_off = UINT16_MAX;
at->mhdr_off = UINT16_MAX;
- at->rx_cpy_pos = pos;
for (i = 0; actions->type != RTE_FLOW_ACTION_TYPE_END;
actions++, masks++, i++) {
const struct rte_flow_action_modify_field *info;
@@ -9547,14 +9564,15 @@ mlx5_mirror_terminal_action(const struct rte_flow_action *action)
static bool
mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
- const struct rte_flow_action *action)
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_action *action)
{
struct mlx5_priv *priv = dev->data->dev_private;
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_RSS:
- if (priv->sh->esw_mode)
+ if (flow_attr->transfer)
return false;
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -9562,7 +9580,7 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (!priv->sh->esw_mode)
+ if (!priv->sh->esw_mode && !flow_attr->transfer)
return false;
if (action[0].type == RTE_FLOW_ACTION_TYPE_RAW_DECAP &&
action[1].type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
@@ -9584,19 +9602,22 @@ mlx5_mirror_validate_sample_action(struct rte_eth_dev *dev,
*/
static int
mlx5_hw_mirror_actions_list_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
const struct rte_flow_action *actions)
{
if (actions[0].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
int i = 1;
bool valid;
const struct rte_flow_action_sample *sample = actions[0].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
if (actions[1].type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
i = 2;
sample = actions[1].conf;
- valid = mlx5_mirror_validate_sample_action(dev, sample->actions);
+ valid = mlx5_mirror_validate_sample_action(dev, flow_attr,
+ sample->actions);
if (!valid)
return -EINVAL;
}
@@ -9780,6 +9801,7 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
struct mlx5_mirror *mirror;
enum mlx5dr_table_type table_type;
struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr *flow_attr = &table_cfg->attr.flow_attr;
uint8_t reformat_buf[MLX5_MIRROR_MAX_CLONES_NUM][MLX5_ENCAP_MAX_LEN];
struct mlx5dr_action_dest_attr mirror_attr[MLX5_MIRROR_MAX_CLONES_NUM + 1];
enum mlx5dr_action_type array_action_types[MLX5_MIRROR_MAX_CLONES_NUM + 1]
@@ -9787,9 +9809,10 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
memset(mirror_attr, 0, sizeof(mirror_attr));
memset(array_action_types, 0, sizeof(array_action_types));
- table_type = get_mlx5dr_table_type(&table_cfg->attr.flow_attr);
+ table_type = get_mlx5dr_table_type(flow_attr);
hws_flags = mlx5_hw_act_flag[MLX5_HW_ACTION_FLAG_NONE_ROOT][table_type];
- clones_num = mlx5_hw_mirror_actions_list_validate(dev, actions);
+ clones_num = mlx5_hw_mirror_actions_list_validate(dev, flow_attr,
+ actions);
if (clones_num < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "Invalid mirror list format");
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* [PATCH v7 10/10] net/mlx5: support indirect list METER_MARK action
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (8 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
@ 2023-10-26 7:12 ` Gregory Etelson
2023-10-29 7:53 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Raslan Darawsheh
10 siblings, 0 replies; 81+ messages in thread
From: Gregory Etelson @ 2023-10-26 7:12 UTC (permalink / raw)
To: dev
Cc: getelson, ,
rasland, Suanming Mou, Matan Azrad, Viacheslav Ovsiienko,
Ori Kam
Support indirect list METER_MARK action.
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 69 ++++-
drivers/net/mlx5/mlx5_flow.h | 70 ++++--
drivers/net/mlx5/mlx5_flow_hw.c | 430 +++++++++++++++++++++++++++-----
3 files changed, 484 insertions(+), 85 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 99b814d815..34252d66c0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -75,8 +75,11 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev)
switch (e->type) {
#ifdef HAVE_MLX5_HWS_SUPPORT
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e, true);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)e);
break;
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ mlx5_destroy_legacy_indirect(dev, e);
+ break;
#endif
default:
DRV_LOG(ERR, "invalid indirect list type");
@@ -1169,7 +1172,24 @@ mlx5_flow_async_action_list_handle_destroy
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error);
static const struct rte_flow_ops mlx5_flow_ops = {
.validate = mlx5_flow_validate,
.create = mlx5_flow_create,
@@ -1219,6 +1239,10 @@ static const struct rte_flow_ops mlx5_flow_ops = {
mlx5_flow_async_action_list_handle_create,
.async_action_list_handle_destroy =
mlx5_flow_async_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ mlx5_flow_action_list_handle_query_update,
+ .async_action_list_handle_query_update =
+ mlx5_flow_async_action_list_handle_query_update,
};
/* Tunnel information. */
@@ -11003,6 +11027,47 @@ mlx5_flow_async_action_list_handle_destroy
error);
}
+static int
+mlx5_flow_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const
+ struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ action_list_handle_query_update, ENOTSUP);
+ return fops->action_list_handle_query_update(dev, handle, update, query,
+ mode, error);
+}
+
+static int
+mlx5_flow_async_action_list_handle_query_update(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const
+ struct rte_flow_op_attr *op_attr,
+ const struct
+ rte_flow_action_list_handle *handle,
+ const void **update,
+ void **query,
+ enum
+ rte_flow_query_update_mode mode,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+
+ MLX5_DRV_FOPS_OR_ERR(dev, fops,
+ async_action_list_handle_query_update, ENOTSUP);
+ return fops->async_action_list_handle_query_update(dev, queue_id, op_attr,
+ handle, update,
+ query, mode,
+ user_data, error);
+}
+
+
/**
* Destroy all indirect actions (shared RSS).
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 653f83cf55..3ea2548d2b 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -98,25 +98,40 @@ enum mlx5_indirect_type {
#define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
enum mlx5_indirect_list_type {
- MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
+ MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
};
-/*
+/**
* Base type for indirect list type.
- * Actual indirect list type MUST override that type and put type spec data
- * after the `chain`.
*/
struct mlx5_indirect_list {
- /* type field MUST be the first */
+ /* Indirect list type. */
enum mlx5_indirect_list_type type;
+ /* Optional storage list entry */
LIST_ENTRY(mlx5_indirect_list) entry;
- /* put type specific data after chain */
};
+static __rte_always_inline void
+mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
+{
+ LIST_HEAD(, mlx5_indirect_list) *h = head;
+
+ LIST_INSERT_HEAD(h, elem, entry);
+}
+
+static __rte_always_inline void
+mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
+{
+ if (elem->entry.le_prev)
+ LIST_REMOVE(elem, entry);
+}
+
static __rte_always_inline enum mlx5_indirect_list_type
-mlx5_get_indirect_list_type(const struct mlx5_indirect_list *obj)
+mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
{
- return obj->type;
+ return ((const struct mlx5_indirect_list *)obj)->type;
}
/* Matches on selected register. */
@@ -1240,9 +1255,12 @@ struct rte_flow_hw {
#pragma GCC diagnostic error "-Wpedantic"
#endif
-struct mlx5dr_action;
-typedef struct mlx5dr_action *
-(*indirect_list_callback_t)(const struct rte_flow_action *);
+struct mlx5_action_construct_data;
+typedef int
+(*indirect_list_callback_t)(struct rte_eth_dev *,
+ const struct mlx5_action_construct_data *,
+ const struct rte_flow_action *,
+ struct mlx5dr_rule_action *);
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
@@ -1252,6 +1270,7 @@ struct mlx5_action_construct_data {
uint32_t idx; /* Data index. */
uint16_t action_src; /* rte_flow_action src offset. */
uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
+ indirect_list_callback_t indirect_list_cb;
union {
struct {
/* encap data len. */
@@ -1291,10 +1310,8 @@ struct mlx5_action_construct_data {
} shared_counter;
struct {
uint32_t id;
+ uint32_t conf_masked:1;
} shared_meter;
- struct {
- indirect_list_callback_t cb;
- } indirect_list;
};
};
@@ -2017,7 +2034,21 @@ typedef int
const struct rte_flow_op_attr *op_attr,
struct rte_flow_action_list_handle *action_handle,
void *user_data, struct rte_flow_error *error);
-
+typedef int
+(*mlx5_flow_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error);
+typedef int
+(*mlx5_flow_async_action_list_handle_query_update_t)
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error);
struct mlx5_flow_driver_ops {
mlx5_flow_validate_t validate;
@@ -2085,6 +2116,10 @@ struct mlx5_flow_driver_ops {
async_action_list_handle_create;
mlx5_flow_async_action_list_handle_destroy_t
async_action_list_handle_destroy;
+ mlx5_flow_action_list_handle_query_update_t
+ action_list_handle_query_update;
+ mlx5_flow_async_action_list_handle_query_update_t
+ async_action_list_handle_query_update;
};
/* mlx5_flow.c */
@@ -2820,6 +2855,9 @@ mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
#ifdef HAVE_MLX5_HWS_SUPPORT
struct mlx5_mirror;
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release);
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
+void
+mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr);
#endif
#endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f9f735ba75..b6a474021a 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -61,16 +61,23 @@
#define MLX5_MIRROR_MAX_CLONES_NUM 3
#define MLX5_MIRROR_MAX_SAMPLE_ACTIONS_LEN 4
+#define MLX5_HW_PORT_IS_PROXY(priv) \
+ (!!((priv)->sh->esw_mode && (priv)->master))
+
+
+struct mlx5_indlst_legacy {
+ struct mlx5_indirect_list indirect;
+ struct rte_flow_action_handle *handle;
+ enum rte_flow_action_type legacy_type;
+};
+
struct mlx5_mirror_clone {
enum rte_flow_action_type type;
void *action_ctx;
};
struct mlx5_mirror {
- /* type field MUST be the first */
- enum mlx5_indirect_list_type type;
- LIST_ENTRY(mlx5_indirect_list) entry;
-
+ struct mlx5_indirect_list indirect;
uint32_t clones_num;
struct mlx5dr_action *mirror_action;
struct mlx5_mirror_clone clone[MLX5_MIRROR_MAX_CLONES_NUM];
@@ -598,7 +605,7 @@ flow_hw_act_data_indirect_list_append(struct mlx5_priv *priv,
act_data = __flow_hw_act_data_alloc(priv, type, action_src, action_dst);
if (!act_data)
return -1;
- act_data->indirect_list.cb = cb;
+ act_data->indirect_list_cb = cb;
LIST_INSERT_HEAD(&acts->act_list, act_data, next);
return 0;
}
@@ -1416,46 +1423,211 @@ flow_hw_meter_mark_compile(struct rte_eth_dev *dev,
return 0;
}
-static struct mlx5dr_action *
-flow_hw_mirror_action(const struct rte_flow_action *action)
+static int
+flow_hw_translate_indirect_mirror(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
+ const struct mlx5_mirror *mirror = (typeof(mirror))list_conf->handle;
+
+ dr_rule->action = mirror->mirror_action;
+ return 0;
+}
+
+/**
+ * HWS mirror implemented as FW island.
+ * The action does not support indirect list flow configuration.
+ * If template handle was masked, use handle mirror action in flow rules.
+ * Otherwise let flow rule specify mirror handle.
+ */
+static int
+hws_table_tmpl_translate_indirect_mirror(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret = 0;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+
+ if (mask_conf && mask_conf->handle) {
+ /**
+ * If mirror handle was masked, assign fixed DR5 mirror action.
+ */
+ flow_hw_translate_indirect_mirror(dev, NULL, action,
+ &acts->rule_acts[action_dst]);
+ } else {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst,
+ flow_hw_translate_indirect_mirror);
+ }
+ return ret;
+}
+
+static int
+flow_dr_set_meter(struct mlx5_priv *priv,
+ struct mlx5dr_rule_action *dr_rule,
+ const struct rte_flow_action_indirect_list *action_conf)
+{
+ const struct mlx5_indlst_legacy *legacy_obj =
+ (typeof(legacy_obj))action_conf->handle;
+ struct mlx5_aso_mtr_pool *mtr_pool = priv->hws_mpool;
+ uint32_t act_idx = (uint32_t)(uintptr_t)legacy_obj->handle;
+ uint32_t mtr_id = act_idx & (RTE_BIT32(MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
+ struct mlx5_aso_mtr *aso_mtr = mlx5_ipool_get(mtr_pool->idx_pool, mtr_id);
+
+ if (!aso_mtr)
+ return -EINVAL;
+ dr_rule->action = mtr_pool->action;
+ dr_rule->aso_meter.offset = aso_mtr->offset;
+ return 0;
+}
+
+__rte_always_inline static void
+flow_dr_mtr_flow_color(struct mlx5dr_rule_action *dr_rule, enum rte_color init_color)
+{
+ dr_rule->aso_meter.init_color =
+ (enum mlx5dr_action_aso_meter_color)rte_col_2_mlx5_col(init_color);
+}
+
+static int
+flow_hw_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct mlx5_action_construct_data *act_data,
+ const struct rte_flow_action *action,
+ struct mlx5dr_rule_action *dr_rule)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+
+ /*
+ * Masked indirect handle set dr5 action during template table
+ * translation.
+ */
+ if (!dr_rule->action) {
+ ret = flow_dr_set_meter(priv, dr_rule, action_conf);
+ if (ret)
+ return ret;
+ }
+ if (!act_data->shared_meter.conf_masked) {
+ if (flow_conf && flow_conf[0] && flow_conf[0]->init_color < RTE_COLORS)
+ flow_dr_mtr_flow_color(dr_rule, flow_conf[0]->init_color);
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_meter(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
+{
+ int ret;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_indirect_list *action_conf = action->conf;
+ const struct rte_flow_action_indirect_list *mask_conf = mask->conf;
+ bool is_handle_masked = mask_conf && mask_conf->handle;
+ bool is_conf_masked = mask_conf && mask_conf->conf && mask_conf->conf[0];
+ struct mlx5dr_rule_action *dr_rule = &acts->rule_acts[action_dst];
+
+ if (is_handle_masked) {
+ ret = flow_dr_set_meter(priv, dr_rule, action->conf);
+ if (ret)
+ return ret;
+ }
+ if (is_conf_masked) {
+ const struct
+ rte_flow_indirect_update_flow_meter_mark **flow_conf =
+ (typeof(flow_conf))action_conf->conf;
+ flow_dr_mtr_flow_color(dr_rule,
+ flow_conf[0]->init_color);
+ }
+ if (!is_handle_masked || !is_conf_masked) {
+ struct mlx5_action_construct_data *act_data;
+
+ ret = flow_hw_act_data_indirect_list_append
+ (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
+ action_src, action_dst, flow_hw_translate_indirect_meter);
+ if (ret)
+ return ret;
+ act_data = LIST_FIRST(&acts->act_list);
+ act_data->shared_meter.conf_masked = is_conf_masked;
+ }
+ return 0;
+}
+
+static int
+hws_table_tmpl_translate_indirect_legacy(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ const struct rte_flow_action *mask,
+ struct mlx5_hw_actions *acts,
+ uint16_t action_src, uint16_t action_dst)
{
- struct mlx5_mirror *mirror = (void *)(uintptr_t)action->conf;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = action->conf;
+ struct mlx5_indlst_legacy *indlst_obj = (typeof(indlst_obj))indlst_conf->handle;
+ uint32_t act_idx = (uint32_t)(uintptr_t)indlst_obj->handle;
+ uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
- return mirror->mirror_action;
+ switch (type) {
+ case MLX5_INDIRECT_ACTION_TYPE_METER_MARK:
+ ret = hws_table_tmpl_translate_indirect_meter(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
}
+/*
+ * template .. indirect_list handle Ht conf Ct ..
+ * mask .. indirect_list handle Hm conf Cm ..
+ *
+ * PMD requires Ht != 0 to resolve handle type.
+ * If Ht was masked (Hm != 0) DR5 action will be set according to Ht and will
+ * not change. Otherwise, DR5 action will be resolved during flow rule build.
+ * If Ct was masked (Cm != 0), table template processing updates base
+ * indirect action configuration with Ct parameters.
+ */
static int
table_template_translate_indirect_list(struct rte_eth_dev *dev,
const struct rte_flow_action *action,
const struct rte_flow_action *mask,
struct mlx5_hw_actions *acts,
- uint16_t action_src,
- uint16_t action_dst)
+ uint16_t action_src, uint16_t action_dst)
{
- int ret;
- bool is_masked = action->conf && mask->conf;
- struct mlx5_priv *priv = dev->data->dev_private;
+ int ret = 0;
enum mlx5_indirect_list_type type;
+ const struct rte_flow_action_indirect_list *list_conf = action->conf;
- if (!action->conf)
+ if (!list_conf || !list_conf->handle)
return -EINVAL;
- type = mlx5_get_indirect_list_type(action->conf);
+ type = mlx5_get_indirect_list_type(list_conf->handle);
switch (type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = hws_table_tmpl_translate_indirect_legacy(dev, action, mask,
+ acts, action_src,
+ action_dst);
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- if (is_masked) {
- acts->rule_acts[action_dst].action = flow_hw_mirror_action(action);
- } else {
- ret = flow_hw_act_data_indirect_list_append
- (priv, acts, RTE_FLOW_ACTION_TYPE_INDIRECT_LIST,
- action_src, action_dst, flow_hw_mirror_action);
- if (ret)
- return ret;
- }
+ ret = hws_table_tmpl_translate_indirect_mirror(dev, action, mask,
+ acts, action_src,
+ action_dst);
break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
/**
@@ -2366,8 +2538,8 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
(int)action->type == act_data->type);
switch ((int)act_data->type) {
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- rule_acts[act_data->action_dst].action =
- act_data->indirect_list.cb(action);
+ act_data->indirect_list_cb(dev, act_data, actions,
+ &rule_acts[act_data->action_dst]);
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
if (flow_hw_shared_action_construct
@@ -4664,20 +4836,11 @@ action_template_set_type(struct rte_flow_actions_template *at,
}
static int
-flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
- unsigned int action_src,
+flow_hw_dr_actions_template_handle_shared(int type, uint32_t action_src,
enum mlx5dr_action_type *action_types,
uint16_t *curr_off, uint16_t *cnt_off,
struct rte_flow_actions_template *at)
{
- uint32_t type;
-
- if (!mask) {
- DRV_LOG(WARNING, "Unable to determine indirect action type "
- "without a mask specified");
- return -EINVAL;
- }
- type = mask->type;
switch (type) {
case RTE_FLOW_ACTION_TYPE_RSS:
action_template_set_type(at, action_types, action_src, curr_off,
@@ -4718,12 +4881,24 @@ static int
flow_hw_template_actions_list(struct rte_flow_actions_template *at,
unsigned int action_src,
enum mlx5dr_action_type *action_types,
- uint16_t *curr_off)
+ uint16_t *curr_off, uint16_t *cnt_off)
{
- enum mlx5_indirect_list_type list_type;
+ int ret;
+ const struct rte_flow_action_indirect_list *indlst_conf = at->actions[action_src].conf;
+ enum mlx5_indirect_list_type list_type = mlx5_get_indirect_list_type(indlst_conf->handle);
+ const union {
+ struct mlx5_indlst_legacy *legacy;
+ struct rte_flow_action_list_handle *handle;
+ } indlst_obj = { .handle = indlst_conf->handle };
- list_type = mlx5_get_indirect_list_type(at->actions[action_src].conf);
switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY:
+ ret = flow_hw_dr_actions_template_handle_shared
+ (indlst_obj.legacy->legacy_type, action_src,
+ action_types, curr_off, cnt_off, at);
+ if (ret)
+ return ret;
+ break;
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
action_template_set_type(at, action_types, action_src, curr_off,
MLX5DR_ACTION_TYP_DEST_ARRAY);
@@ -4769,17 +4944,14 @@ flow_hw_dr_actions_template_create(struct rte_flow_actions_template *at)
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
ret = flow_hw_template_actions_list(at, i, action_types,
- &curr_off);
+ &curr_off, &cnt_off);
if (ret)
return NULL;
break;
case RTE_FLOW_ACTION_TYPE_INDIRECT:
ret = flow_hw_dr_actions_template_handle_shared
- (&at->masks[i],
- i,
- action_types,
- &curr_off,
- &cnt_off, at);
+ (at->masks[i].type, i, action_types,
+ &curr_off, &cnt_off, at);
if (ret)
return NULL;
break;
@@ -5259,9 +5431,8 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
* Need to restore the indirect action index from action conf here.
*/
case RTE_FLOW_ACTION_TYPE_INDIRECT:
- case RTE_FLOW_ACTION_TYPE_INDIRECT_LIST:
- at->actions[i].conf = actions->conf;
- at->masks[i].conf = masks->conf;
+ at->actions[i].conf = ra[i].conf;
+ at->masks[i].conf = rm[i].conf;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
info = actions->conf;
@@ -9519,18 +9690,16 @@ mlx5_mirror_destroy_clone(struct rte_eth_dev *dev,
}
void
-mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror, bool release)
+mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror)
{
uint32_t i;
- if (mirror->entry.le_prev)
- LIST_REMOVE(mirror, entry);
+ mlx5_indirect_list_remove_entry(&mirror->indirect);
for (i = 0; i < mirror->clones_num; i++)
mlx5_mirror_destroy_clone(dev, &mirror->clone[i]);
if (mirror->mirror_action)
mlx5dr_action_destroy(mirror->mirror_action);
- if (release)
- mlx5_free(mirror);
+ mlx5_free(mirror);
}
static inline enum mlx5dr_table_type
@@ -9825,7 +9994,8 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
actions, "Failed to allocate mirror context");
return NULL;
}
- mirror->type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+
+ mirror->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
mirror->clones_num = clones_num;
for (i = 0; i < clones_num; i++) {
const struct rte_flow_action *clone_actions;
@@ -9856,15 +10026,72 @@ mlx5_hw_mirror_handle_create(struct rte_eth_dev *dev,
goto error;
}
- LIST_INSERT_HEAD(&priv->indirect_list_head,
- (struct mlx5_indirect_list *)mirror, entry);
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &mirror->indirect);
return (struct rte_flow_action_list_handle *)mirror;
error:
- mlx5_hw_mirror_destroy(dev, mirror, true);
+ mlx5_hw_mirror_destroy(dev, mirror);
return NULL;
}
+void
+mlx5_destroy_legacy_indirect(__rte_unused struct rte_eth_dev *dev,
+ struct mlx5_indirect_list *ptr)
+{
+ struct mlx5_indlst_legacy *obj = (typeof(obj))ptr;
+
+ switch (obj->legacy_type) {
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ break; /* ASO meters were released in mlx5_flow_meter_flush() */
+ default:
+ break;
+ }
+ mlx5_free(obj);
+}
+
+static struct rte_flow_action_list_handle *
+mlx5_create_legacy_indlst(struct rte_eth_dev *dev, uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *actions,
+ void *user_data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_indlst_legacy *indlst_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*indlst_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!indlst_obj)
+ return NULL;
+ indlst_obj->handle = flow_hw_action_handle_create(dev, queue, attr, conf,
+ actions, user_data,
+ error);
+ if (!indlst_obj->handle) {
+ mlx5_free(indlst_obj);
+ return NULL;
+ }
+ indlst_obj->legacy_type = actions[0].type;
+ indlst_obj->indirect.type = MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY;
+ mlx5_indirect_list_add_entry(&priv->indirect_list_head, &indlst_obj->indirect);
+ return (struct rte_flow_action_list_handle *)indlst_obj;
+}
+
+static __rte_always_inline enum mlx5_indirect_list_type
+flow_hw_inlist_type_get(const struct rte_flow_action *actions)
+{
+ switch (actions[0].type) {
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR;
+ case RTE_FLOW_ACTION_TYPE_METER_MARK:
+ return actions[1].type == RTE_FLOW_ACTION_TYPE_END ?
+ MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY :
+ MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+ default:
+ break;
+ }
+ return MLX5_INDIRECT_ACTION_LIST_TYPE_ERR;
+}
+
static struct rte_flow_action_list_handle *
flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
const struct rte_flow_op_attr *attr,
@@ -9875,6 +10102,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
{
struct mlx5_hw_q_job *job = NULL;
bool push = flow_hw_action_push(attr);
+ enum mlx5_indirect_list_type list_type;
struct rte_flow_action_list_handle *handle;
struct mlx5_priv *priv = dev->data->dev_private;
const struct mlx5_flow_template_table_cfg table_cfg = {
@@ -9893,6 +10121,16 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
NULL, "No action list");
return NULL;
}
+ list_type = flow_hw_inlist_type_get(actions);
+ if (list_type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ /*
+ * Legacy indirect actions already have
+ * async resources management. No need to do it twice.
+ */
+ handle = mlx5_create_legacy_indlst(dev, queue, attr, conf,
+ actions, user_data, error);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
@@ -9900,8 +10138,8 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
if (!job)
return NULL;
}
- switch (actions[0].type) {
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ switch (list_type) {
+ case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
handle = mlx5_hw_mirror_handle_create(dev, &table_cfg,
actions, error);
break;
@@ -9915,6 +10153,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
flow_hw_action_finalize(dev, queue, job, push, false,
handle != NULL);
}
+end:
return handle;
}
@@ -9943,6 +10182,15 @@ flow_hw_async_action_list_handle_destroy
enum mlx5_indirect_list_type type =
mlx5_get_indirect_list_type((void *)handle);
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (typeof(legacy))handle;
+
+ ret = flow_hw_action_handle_destroy(dev, queue, attr,
+ legacy->handle,
+ user_data, error);
+ mlx5_indirect_list_remove_entry(&legacy->indirect);
+ goto end;
+ }
if (attr) {
job = flow_hw_action_job_init(priv, queue, NULL, user_data,
NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
@@ -9952,20 +10200,17 @@ flow_hw_async_action_list_handle_destroy
}
switch (type) {
case MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR:
- mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle, false);
+ mlx5_hw_mirror_destroy(dev, (struct mlx5_mirror *)handle);
break;
default:
- handle = NULL;
ret = rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "Invalid indirect list handle");
}
if (job) {
- job->action = handle;
- flow_hw_action_finalize(dev, queue, job, push, false,
- handle != NULL);
+ flow_hw_action_finalize(dev, queue, job, push, false, true);
}
- mlx5_free(handle);
+end:
return ret;
}
@@ -9979,6 +10224,53 @@ flow_hw_action_list_handle_destroy(struct rte_eth_dev *dev,
error);
}
+static int
+flow_hw_async_action_list_handle_query_update
+ (struct rte_eth_dev *dev, uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ void *user_data, struct rte_flow_error *error)
+{
+ enum mlx5_indirect_list_type type =
+ mlx5_get_indirect_list_type((const void *)handle);
+
+ if (type == MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY) {
+ struct mlx5_indlst_legacy *legacy = (void *)(uintptr_t)handle;
+
+ if (update && query)
+ return flow_hw_async_action_handle_query_update
+ (dev, queue_id, attr, legacy->handle,
+ update, query, mode, user_data, error);
+ else if (update && update[0])
+ return flow_hw_action_handle_update(dev, queue_id, attr,
+ legacy->handle, update[0],
+ user_data, error);
+ else if (query && query[0])
+ return flow_hw_action_handle_query(dev, queue_id, attr,
+ legacy->handle, query[0],
+ user_data, error);
+ else
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid legacy handle query_update parameters");
+ }
+ return -ENOTSUP;
+}
+
+static int
+flow_hw_action_list_handle_query_update(struct rte_eth_dev *dev,
+ const struct rte_flow_action_list_handle *handle,
+ const void **update, void **query,
+ enum rte_flow_query_update_mode mode,
+ struct rte_flow_error *error)
+{
+ return flow_hw_async_action_list_handle_query_update
+ (dev, MLX5_HW_INV_QUEUE, NULL, handle,
+ update, query, mode, NULL, error);
+}
+
const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.info_get = flow_hw_info_get,
.configure = flow_hw_configure,
@@ -10009,10 +10301,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.action_query_update = flow_hw_action_query_update,
.action_list_handle_create = flow_hw_action_list_handle_create,
.action_list_handle_destroy = flow_hw_action_list_handle_destroy,
+ .action_list_handle_query_update =
+ flow_hw_action_list_handle_query_update,
.async_action_list_handle_create =
flow_hw_async_action_list_handle_create,
.async_action_list_handle_destroy =
flow_hw_async_action_list_handle_destroy,
+ .async_action_list_handle_query_update =
+ flow_hw_async_action_list_handle_query_update,
.query = flow_hw_query,
.get_aged_flows = flow_hw_get_aged_flows,
.get_q_aged_flows = flow_hw_get_q_aged_flows,
--
2.39.2
^ permalink raw reply [flat|nested] 81+ messages in thread
* RE: [PATCH v7 00/10] net/mlx5: support indirect actions list
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
` (9 preceding siblings ...)
2023-10-26 7:12 ` [PATCH v7 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
@ 2023-10-29 7:53 ` Raslan Darawsheh
10 siblings, 0 replies; 81+ messages in thread
From: Raslan Darawsheh @ 2023-10-29 7:53 UTC (permalink / raw)
To: Gregory Etelson, dev; +Cc: Maayan Kashani
Hi,
> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Thursday, October 26, 2023 10:12 AM
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Maayan Kashani
> <mkashani@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v7 00/10] net/mlx5: support indirect actions list
>
> Add MLX5 PMD support for indirect actions list.
>
> Erez Shitrit (1):
> net/mlx5/hws: allow destination into default miss FT
>
> Gregory Etelson (4):
> net/mlx5: reformat HWS code for HWS mirror action
> net/mlx5: support HWS mirror action
> net/mlx5: reformat HWS code for indirect list actions
> net/mlx5: support indirect list METER_MARK action
>
> Haifei Luo (1):
> net/mlx5/hws: support reformat for hws mirror
>
> Hamdan Igbaria (3):
> net/mlx5/hws: add support for reformat DevX object
> net/mlx5/hws: support creating of dynamic forward table and FTE
> net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action
>
> Shun Hao (1):
> net/mlx5/hws: add support for mirroring
>
> doc/guides/nics/features/mlx5.ini | 1 +
> doc/guides/rel_notes/release_23_11.rst | 1 +
> drivers/common/mlx5/mlx5_prm.h | 81 +-
> drivers/net/mlx5/hws/mlx5dr.h | 34 +
> drivers/net/mlx5/hws/mlx5dr_action.c | 210 +++-
> drivers/net/mlx5/hws/mlx5dr_action.h | 8 +
> drivers/net/mlx5/hws/mlx5dr_cmd.c | 143 ++-
> drivers/net/mlx5/hws/mlx5dr_cmd.h | 49 +-
> drivers/net/mlx5/hws/mlx5dr_debug.c | 1 +
> drivers/net/mlx5/hws/mlx5dr_internal.h | 5 +
> drivers/net/mlx5/hws/mlx5dr_send.c | 5 -
> drivers/net/mlx5/hws/mlx5dr_table.c | 8 +-
> drivers/net/mlx5/mlx5.c | 1 +
> drivers/net/mlx5/mlx5.h | 2 +
> drivers/net/mlx5/mlx5_flow.c | 199 ++++
> drivers/net/mlx5/mlx5_flow.h | 111 ++-
> drivers/net/mlx5/mlx5_flow_hw.c | 1217 +++++++++++++++++++++---
> 17 files changed, 1908 insertions(+), 168 deletions(-)
>
> --
> v3: Add ACK to patches in the series.
> v4: Squash reformat patches.
> v5: Update release notes.
> Fix code style.
> v6: Fix code style.
> v7: Fix incremental compilation failure.
> --
> 2.39.2
Series applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 81+ messages in thread
end of thread, other threads:[~2023-10-29 7:53 UTC | newest]
Thread overview: 81+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-27 19:10 [PATCH 0/3] net/mlx5: support indirect list actions Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 07/16] net/mlx5: reformat HWS code Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 08/16] net/mlx5: support HWS mirror action Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 09/16] net/mlx5: fix mirror action validation Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 12/16] net/mlx5: refactor HWS code Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-16 18:42 ` [PATCH v2 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
2023-10-17 7:56 ` [PATCH v2 01/16] net/mlx5/hws: add support for reformat DevX object Suanming Mou
2023-10-17 7:31 ` [PATCH v2 00/16] net/mlx5: support indirect actions list Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 " Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 01/16] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 02/16] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 03/16] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 04/16] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 05/16] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 06/16] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 07/16] net/mlx5: reformat HWS code Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 08/16] net/mlx5: support HWS mirror action Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 09/16] net/mlx5: fix mirror action validation Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 10/16] net/mlx5: fix in shared counter and age template action create Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 11/16] net/mlx5: fix modify field expansion for raw DECAP / ENCAP Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 12/16] net/mlx5: refactor HWS code Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 13/16] net/mlx5: fix RTE action location tracking in a template Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 14/16] net/mlx5: fix mirror redirect action Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 15/16] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-17 8:09 ` [PATCH v3 16/16] net/mlx5: fix METER_MARK indirection list callback Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 08/10] net/mlx5: support " Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-23 12:42 ` [PATCH v4 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 08/10] net/mlx5: support " Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-25 10:27 ` [PATCH v5 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 08/10] net/mlx5: support " Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-25 11:22 ` [PATCH v6 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 01/10] net/mlx5/hws: add support for reformat DevX object Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 02/10] net/mlx5/hws: support creating of dynamic forward table and FTE Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 03/10] net/mlx5/hws: add mlx5dr DevX object struct to mlx5dr action Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 04/10] net/mlx5/hws: add support for mirroring Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 05/10] net/mlx5/hws: allow destination into default miss FT Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 06/10] net/mlx5/hws: support reformat for hws mirror Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 07/10] net/mlx5: reformat HWS code for HWS mirror action Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 08/10] net/mlx5: support " Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 09/10] net/mlx5: reformat HWS code for indirect list actions Gregory Etelson
2023-10-26 7:12 ` [PATCH v7 10/10] net/mlx5: support indirect list METER_MARK action Gregory Etelson
2023-10-29 7:53 ` [PATCH v7 00/10] net/mlx5: support indirect actions list Raslan Darawsheh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).