* [PATCH 2/4] net/mlx5: add flow rule insertion by index
2023-01-26 23:40 [PATCH 0/4] net/mlx5: add template table insertion and matching types Alexander Kozyrev
2023-01-26 23:40 ` [PATCH 1/4] net/mlx5: add table insertion type and hash function Alexander Kozyrev
@ 2023-01-26 23:40 ` Alexander Kozyrev
2023-03-06 15:17 ` Slava Ovsiienko
2023-01-26 23:40 ` [PATCH 3/4] net/mlx5: add hash result metadata to modify field Alexander Kozyrev
` (2 subsequent siblings)
4 siblings, 1 reply; 10+ messages in thread
From: Alexander Kozyrev @ 2023-01-26 23:40 UTC (permalink / raw)
To: dev; +Cc: thomas, orika, rasland, matan, viacheslavo
New Flow API allows to insert flow rules into a specified
index for tables with the index-based insertion type.
Implement rte_flow_async_create_by_index API in mlx5 PMD.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 61 +++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 12 ++++
drivers/net/mlx5/mlx5_flow_hw.c | 114 ++++++++++++++++++++++++++++++++
3 files changed, 187 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f5e2831480..ba1eb5309b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1027,6 +1027,16 @@ mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
uint8_t action_template_index,
void *user_data,
struct rte_flow_error *error);
+static struct rte_flow *
+mlx5_flow_async_flow_create_by_index(struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
static int
mlx5_flow_async_flow_destroy(struct rte_eth_dev *dev,
uint32_t queue,
@@ -1107,6 +1117,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.template_table_create = mlx5_flow_table_create,
.template_table_destroy = mlx5_flow_table_destroy,
.async_create = mlx5_flow_async_flow_create,
+ .async_create_by_index = mlx5_flow_async_flow_create_by_index,
.async_destroy = mlx5_flow_async_flow_destroy,
.pull = mlx5_flow_pull,
.push = mlx5_flow_push,
@@ -8853,6 +8864,56 @@ mlx5_flow_async_flow_create(struct rte_eth_dev *dev,
user_data, error);
}
+/**
+ * Enqueue flow creation by index.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] queue_id
+ * The queue to create the flow.
+ * @param[in] attr
+ * Pointer to the flow operation attributes.
+ * @param[in] rule_index
+ * The item pattern flow follows from the table.
+ * @param[in] actions
+ * Action with flow spec value.
+ * @param[in] action_template_index
+ * The action pattern flow follows from the table.
+ * @param[in] user_data
+ * Pointer to the user_data.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+mlx5_flow_async_flow_create_by_index(struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ const struct mlx5_flow_driver_ops *fops;
+ struct rte_flow_attr fattr = {0};
+
+ if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow_q create with incorrect steering mode");
+ return NULL;
+ }
+ fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+ return fops->async_flow_create_by_index(dev, queue_id, attr, table,
+ rule_index, actions, action_template_index,
+ user_data, error);
+}
+
/**
* Enqueue flow destruction.
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e376dcae93..c2f9ffd760 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1149,6 +1149,7 @@ struct rte_flow_hw {
uint32_t age_idx;
cnt_id_t cnt_id;
uint32_t mtr_id;
+ uint32_t rule_idx;
uint8_t rule[0]; /* HWS layer data struct. */
} __rte_packed;
@@ -1810,6 +1811,16 @@ typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
uint8_t action_template_index,
void *user_data,
struct rte_flow_error *error);
+typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
+ (struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
typedef int (*mlx5_flow_async_flow_destroy_t)
(struct rte_eth_dev *dev,
uint32_t queue,
@@ -1912,6 +1923,7 @@ struct mlx5_flow_driver_ops {
mlx5_flow_table_create_t template_table_create;
mlx5_flow_table_destroy_t template_table_destroy;
mlx5_flow_async_flow_create_t async_flow_create;
+ mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
mlx5_flow_async_flow_destroy_t async_flow_destroy;
mlx5_flow_pull_t pull;
mlx5_flow_push_t push;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 8002c88e4a..b209b448c6 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2586,6 +2586,118 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
return NULL;
}
+/**
+ * Enqueue HW steering flow creation by index.
+ *
+ * The flow will be applied to the HW only if the postpone bit is not set or
+ * the extra push function is called.
+ * The flow creation status should be checked from dequeue result.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] queue
+ * The queue to create the flow.
+ * @param[in] attr
+ * Pointer to the flow operation attributes.
+ * @param[in] rule_index
+ * The item pattern flow follows from the table.
+ * @param[in] actions
+ * Action with flow spec value.
+ * @param[in] action_template_index
+ * The action pattern flow follows from the table.
+ * @param[in] user_data
+ * Pointer to the user_data.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * Flow pointer on success, NULL otherwise and rte_errno is set.
+ */
+static struct rte_flow *
+flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_action actions[],
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5dr_rule_attr rule_attr = {
+ .queue_id = queue,
+ .user_data = user_data,
+ .burst = attr->postpone,
+ };
+ struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
+ struct rte_flow_hw *flow;
+ struct mlx5_hw_q_job *job;
+ uint32_t flow_idx;
+ int ret;
+
+ if (unlikely(rule_index >= table->cfg.attr.nb_flows)) {
+ rte_errno = EINVAL;
+ goto error;
+ }
+ if (unlikely(!priv->hw_q[queue].job_idx)) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ flow = mlx5_ipool_zmalloc(table->flow, &flow_idx);
+ if (!flow)
+ goto error;
+ /*
+ * Set the table here in order to know the destination table
+ * when free the flow afterwards.
+ */
+ flow->table = table;
+ flow->idx = flow_idx;
+ job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
+ /*
+ * Set the job type here in order to know if the flow memory
+ * should be freed or not when get the result from dequeue.
+ */
+ job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
+ job->flow = flow;
+ job->user_data = user_data;
+ rule_attr.user_data = job;
+ /*
+ * Set the rule index.
+ */
+ MLX5_ASSERT(flow_idx > 0);
+ rule_attr.rule_idx = rule_index;
+ flow->rule_idx = rule_index;
+ /*
+ * Construct the flow actions based on the input actions.
+ * The implicitly appended action is always fixed, like metadata
+ * copy action from FDB to NIC Rx.
+ * No need to copy and contrust a new "actions" list based on the
+ * user's input, in order to save the cost.
+ */
+ if (flow_hw_actions_construct(dev, job,
+ &table->ats[action_template_index],
+ action_template_index, actions,
+ rule_acts, queue, error)) {
+ rte_errno = EINVAL;
+ goto free;
+ }
+ ret = mlx5dr_rule_create(table->matcher,
+ 0, NULL, action_template_index, rule_acts,
+ &rule_attr, (struct mlx5dr_rule *)flow->rule);
+ if (likely(!ret))
+ return (struct rte_flow *)flow;
+free:
+ /* Flow created fail, return the descriptor and flow memory. */
+ mlx5_ipool_free(table->flow, flow_idx);
+ priv->hw_q[queue].job_idx++;
+error:
+ rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "fail to create rte flow");
+ return NULL;
+}
+
/**
* Enqueue HW steering flow destruction.
*
@@ -2636,6 +2748,7 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
job->user_data = user_data;
job->flow = fh;
rule_attr.user_data = job;
+ rule_attr.rule_idx = fh->rule_idx;
ret = mlx5dr_rule_destroy((struct mlx5dr_rule *)fh->rule, &rule_attr);
if (likely(!ret))
return 0;
@@ -8345,6 +8458,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
.template_table_create = flow_hw_template_table_create,
.template_table_destroy = flow_hw_table_destroy,
.async_flow_create = flow_hw_async_flow_create,
+ .async_flow_create_by_index = flow_hw_async_flow_create_by_index,
.async_flow_destroy = flow_hw_async_flow_destroy,
.pull = flow_hw_pull,
.push = flow_hw_push,
--
2.18.2
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 3/4] net/mlx5: add hash result metadata to modify field
2023-01-26 23:40 [PATCH 0/4] net/mlx5: add template table insertion and matching types Alexander Kozyrev
2023-01-26 23:40 ` [PATCH 1/4] net/mlx5: add table insertion type and hash function Alexander Kozyrev
2023-01-26 23:40 ` [PATCH 2/4] net/mlx5: add flow rule insertion by index Alexander Kozyrev
@ 2023-01-26 23:40 ` Alexander Kozyrev
2023-03-06 15:18 ` Slava Ovsiienko
2023-01-26 23:40 ` [PATCH 4/4] net/mlx5: define index register for linear tables Alexander Kozyrev
2023-03-07 11:13 ` [PATCH 0/4] net/mlx5: add template table insertion and matching types Raslan Darawsheh
4 siblings, 1 reply; 10+ messages in thread
From: Alexander Kozyrev @ 2023-01-26 23:40 UTC (permalink / raw)
To: dev; +Cc: thomas, orika, rasland, matan, viacheslavo
Allow copy of the hash result via modify_field Flow API.
Setting this value is not allowed, it is read-only field.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 12 ++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 5 +++--
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7ca909999b..84fc725738 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1390,6 +1390,8 @@ mlx5_flow_item_field_width(struct rte_eth_dev *dev,
case RTE_FLOW_FIELD_IPV6_ECN:
case RTE_FLOW_FIELD_METER_COLOR:
return 2;
+ case RTE_FLOW_FIELD_HASH_RESULT:
+ return 32;
default:
MLX5_ASSERT(false);
}
@@ -1883,6 +1885,16 @@ mlx5_flow_field_id_to_modify_info
info[idx].offset = data->offset;
}
break;
+ case RTE_FLOW_FIELD_HASH_RESULT:
+ MLX5_ASSERT(data->offset + width <= 32);
+ off_be = 32 - (data->offset + width);
+ info[idx] = (struct field_modify_info){4, 0,
+ MLX5_MODI_HASH_RESULT};
+ if (mask)
+ mask[idx] = flow_modify_info_mask_32(width, off_be);
+ else
+ info[idx].offset = off_be;
+ break;
case RTE_FLOW_FIELD_POINTER:
case RTE_FLOW_FIELD_VALUE:
default:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index b209b448c6..6f391d990d 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -3530,10 +3530,11 @@ flow_hw_validate_action_modify_field(const struct rte_flow_action *action,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"destination field mask and template are not equal");
if (action_conf->dst.field == RTE_FLOW_FIELD_POINTER ||
- action_conf->dst.field == RTE_FLOW_FIELD_VALUE)
+ action_conf->dst.field == RTE_FLOW_FIELD_VALUE ||
+ action_conf->dst.field == RTE_FLOW_FIELD_HASH_RESULT)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "immediate value and pointer cannot be used as destination");
+ "immediate value, pointer and hash result cannot be used as destination");
if (mask_conf->dst.level != UINT32_MAX)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
--
2.18.2
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH 4/4] net/mlx5: define index register for linear tables
2023-01-26 23:40 [PATCH 0/4] net/mlx5: add template table insertion and matching types Alexander Kozyrev
` (2 preceding siblings ...)
2023-01-26 23:40 ` [PATCH 3/4] net/mlx5: add hash result metadata to modify field Alexander Kozyrev
@ 2023-01-26 23:40 ` Alexander Kozyrev
2023-03-06 15:18 ` Slava Ovsiienko
2023-03-07 11:13 ` [PATCH 0/4] net/mlx5: add template table insertion and matching types Raslan Darawsheh
4 siblings, 1 reply; 10+ messages in thread
From: Alexander Kozyrev @ 2023-01-26 23:40 UTC (permalink / raw)
To: dev; +Cc: thomas, orika, rasland, matan, viacheslavo
Set MLX5_LINEAR_HASH_TAG_INDEX as a special id for the TAG item:
it holds the index in a linear table for a packet to land to.
This rule index in the table uses upper 16-bits of REG_C_3,
handle this TAG item in the modify_field API for setting the index.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 3 +++
drivers/net/mlx5/mlx5_flow_dv.c | 6 ++++--
drivers/net/mlx5/mlx5_flow_hw.c | 12 ++++++++++--
drivers/net/mlx5/rte_pmd_mlx5.h | 5 +++++
4 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c2f9ffd760..cd1938ecfd 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -17,6 +17,7 @@
#include <mlx5_prm.h>
#include "mlx5.h"
+#include "rte_pmd_mlx5.h"
#include "hws/mlx5dr.h"
/* E-Switch Manager port, used for rte_flow_item_port_id. */
@@ -1588,6 +1589,8 @@ flow_hw_get_reg_id(enum rte_flow_item_type type, uint32_t id)
case RTE_FLOW_ITEM_TYPE_METER_COLOR:
return mlx5_flow_hw_aso_tag;
case RTE_FLOW_ITEM_TYPE_TAG:
+ if (id == MLX5_LINEAR_HASH_TAG_INDEX)
+ return REG_C_3;
MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
return mlx5_flow_hw_avl_tags[id];
default:
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 84fc725738..c6c0eae077 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -1761,6 +1761,8 @@ mlx5_flow_field_id_to_modify_info
MLX5_ASSERT(data->offset + width <= 32);
int reg;
+ off_be = (data->level == MLX5_LINEAR_HASH_TAG_INDEX) ?
+ 16 - (data->offset + width) + 16 : data->offset;
if (priv->sh->config.dv_flow_en == 2)
reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_TAG,
data->level);
@@ -1775,9 +1777,9 @@ mlx5_flow_field_id_to_modify_info
reg_to_field[reg]};
if (mask)
mask[idx] = flow_modify_info_mask_32
- (width, data->offset);
+ (width, off_be);
else
- info[idx].offset = data->offset;
+ info[idx].offset = off_be;
}
break;
case RTE_FLOW_FIELD_MARK:
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 6f391d990d..36a7f2a3bd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -1017,7 +1017,11 @@ flow_hw_modify_field_compile(struct rte_eth_dev *dev,
conf->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
conf->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
value = *(const unaligned_uint32_t *)item.spec;
- value = rte_cpu_to_be_32(value);
+ if (conf->dst.field == RTE_FLOW_FIELD_TAG &&
+ conf->dst.level == MLX5_LINEAR_HASH_TAG_INDEX)
+ value = rte_cpu_to_be_32(value << 16);
+ else
+ value = rte_cpu_to_be_32(value);
item.spec = &value;
} else if (conf->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
/*
@@ -2046,7 +2050,11 @@ flow_hw_modify_field_construct(struct mlx5_hw_q_job *job,
mhdr_action->dst.field == RTE_FLOW_FIELD_METER_COLOR ||
mhdr_action->dst.field == (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG) {
value_p = (unaligned_uint32_t *)values;
- *value_p = rte_cpu_to_be_32(*value_p);
+ if (mhdr_action->dst.field == RTE_FLOW_FIELD_TAG &&
+ mhdr_action->dst.level == MLX5_LINEAR_HASH_TAG_INDEX)
+ *value_p = rte_cpu_to_be_32(*value_p << 16);
+ else
+ *value_p = rte_cpu_to_be_32(*value_p);
} else if (mhdr_action->dst.field == RTE_FLOW_FIELD_GTP_PSC_QFI) {
uint32_t tmp;
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index b71a291256..5365cd8442 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -68,6 +68,11 @@ int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
*/
#define MLX5_EXTERNAL_RX_QUEUE_ID_MIN (UINT16_MAX - 1000 + 1)
+/**
+ * Tag level to set the linear hash index.
+ */
+#define MLX5_LINEAR_HASH_TAG_INDEX 255
+
/**
* Update mapping between rte_flow queue index (16 bits) and HW queue index (32
* bits) for RxQs which is created outside the PMD.
--
2.18.2
^ permalink raw reply [flat|nested] 10+ messages in thread