From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>
Subject: [v6 15/18] net/mlx5/hws: Add HWS rule object
Date: Thu, 20 Oct 2022 18:57:45 +0300 [thread overview]
Message-ID: <20221020155749.16643-16-valex@nvidia.com> (raw)
In-Reply-To: <20221020155749.16643-1-valex@nvidia.com>
HWS rule objects reside under the matcher, each rule holds
the configuration for the packet fields to match on and the
set of actions to execute over the packet that has the requested
fields. Rules can be created asynchronously in parallel over multiple
queues to different matchers. Each rule is configured to the HW.
Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_rule.c | 528 +++++++++++++++++++++++++++++
drivers/net/mlx5/hws/mlx5dr_rule.h | 50 +++
2 files changed, 578 insertions(+)
create mode 100644 drivers/net/mlx5/hws/mlx5dr_rule.c
create mode 100644 drivers/net/mlx5/hws/mlx5dr_rule.h
diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.c b/drivers/net/mlx5/hws/mlx5dr_rule.c
new file mode 100644
index 0000000000..b27318e6d4
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_rule.c
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include "mlx5dr_internal.h"
+
+static void mlx5dr_rule_skip(struct mlx5dr_matcher *matcher,
+ const struct rte_flow_item *items,
+ bool *skip_rx, bool *skip_tx)
+{
+ struct mlx5dr_match_template *mt = matcher->mt[0];
+ const struct flow_hw_port_info *vport;
+ const struct rte_flow_item_ethdev *v;
+
+ /* Flow_src is the 1st priority */
+ if (matcher->attr.optimize_flow_src) {
+ *skip_tx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_WIRE;
+ *skip_rx = matcher->attr.optimize_flow_src == MLX5DR_MATCHER_FLOW_SRC_VPORT;
+ return;
+ }
+
+ /* By default FDB rules are added to both RX and TX */
+ *skip_rx = false;
+ *skip_tx = false;
+
+ if (mt->item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) {
+ v = items[mt->vport_item_id].spec;
+ vport = flow_hw_conv_port_id(v->port_id);
+ if (unlikely(!vport)) {
+ DR_LOG(ERR, "Fail to map port ID %d, ignoring", v->port_id);
+ return;
+ }
+
+ if (!vport->is_wire)
+ /* Match vport ID is not WIRE -> Skip RX */
+ *skip_rx = true;
+ else
+ /* Match vport ID is WIRE -> Skip TX */
+ *skip_tx = true;
+ }
+}
+
+static void mlx5dr_rule_init_dep_wqe(struct mlx5dr_send_ring_dep_wqe *dep_wqe,
+ struct mlx5dr_rule *rule,
+ const struct rte_flow_item *items,
+ void *user_data)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ bool skip_rx, skip_tx;
+
+ dep_wqe->rule = rule;
+ dep_wqe->user_data = user_data;
+
+ switch (tbl->type) {
+ case MLX5DR_TABLE_TYPE_NIC_RX:
+ case MLX5DR_TABLE_TYPE_NIC_TX:
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0->id : 0;
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ break;
+
+ case MLX5DR_TABLE_TYPE_FDB:
+ mlx5dr_rule_skip(matcher, items, &skip_rx, &skip_tx);
+
+ if (!skip_rx) {
+ dep_wqe->rtc_0 = matcher->match_ste.rtc_0->id;
+ dep_wqe->retry_rtc_0 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_0->id : 0;
+ } else {
+ dep_wqe->rtc_0 = 0;
+ dep_wqe->retry_rtc_0 = 0;
+ }
+
+ if (!skip_tx) {
+ dep_wqe->rtc_1 = matcher->match_ste.rtc_1->id;
+ dep_wqe->retry_rtc_1 = matcher->col_matcher ?
+ matcher->col_matcher->match_ste.rtc_1->id : 0;
+ } else {
+ dep_wqe->rtc_1 = 0;
+ dep_wqe->retry_rtc_1 = 0;
+ }
+
+ break;
+
+ default:
+ assert(false);
+ break;
+ }
+}
+
+static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
+ struct mlx5dr_rule *rule,
+ bool err,
+ void *user_data,
+ enum mlx5dr_rule_status rule_status_on_succ)
+{
+ enum rte_flow_op_status comp_status;
+
+ if (!err) {
+ comp_status = RTE_FLOW_OP_SUCCESS;
+ rule->status = rule_status_on_succ;
+ } else {
+ comp_status = RTE_FLOW_OP_ERROR;
+ rule->status = MLX5DR_RULE_STATUS_FAILED;
+ }
+
+ mlx5dr_send_engine_inc_rule(queue);
+ mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
+}
+
+static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ int ret;
+
+ /* Use rule_idx for locking optimzation, otherwise allocate from pool */
+ if (matcher->attr.optimize_using_rule_idx) {
+ rule->action_ste_idx = attr->rule_idx * matcher->action_ste.max_stes;
+ } else {
+ struct mlx5dr_pool_chunk ste = {0};
+
+ ste.order = rte_log2_u32(matcher->action_ste.max_stes);
+ ret = mlx5dr_pool_chunk_alloc(matcher->action_ste.pool, &ste);
+ if (ret) {
+ DR_LOG(ERR, "Failed to allocate STE for rule actions");
+ return ret;
+ }
+ rule->action_ste_idx = ste.offset;
+ }
+ return 0;
+}
+
+void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+
+ if (rule->action_ste_idx > -1 && !matcher->attr.optimize_using_rule_idx) {
+ struct mlx5dr_pool_chunk ste = {0};
+
+ /* This release is safe only when the rule match part was deleted */
+ ste.order = rte_log2_u32(matcher->action_ste.max_stes);
+ ste.offset = rule->action_ste_idx;
+ mlx5dr_pool_chunk_free(matcher->action_ste.pool, &ste);
+ }
+}
+
+static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
+ struct mlx5dr_send_ste_attr *ste_attr,
+ struct mlx5dr_actions_apply_data *apply)
+{
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_table *tbl = matcher->tbl;
+ struct mlx5dr_context *ctx = tbl->ctx;
+
+ /* Init rule before reuse */
+ rule->rtc_0 = 0;
+ rule->rtc_1 = 0;
+ rule->pending_wqes = 0;
+ rule->action_ste_idx = -1;
+ rule->status = MLX5DR_RULE_STATUS_CREATING;
+
+ /* Init default send STE attributes */
+ ste_attr->gta_opcode = MLX5DR_WQE_GTA_OP_ACTIVATE;
+ ste_attr->send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
+ ste_attr->send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
+ ste_attr->send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
+
+ /* Init default action apply */
+ apply->tbl_type = tbl->type;
+ apply->common_res = &ctx->common_res[tbl->type];
+ apply->jump_to_action_stc = matcher->action_ste.stc.offset;
+ apply->require_dep = 0;
+}
+
+static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr,
+ uint8_t mt_idx,
+ const struct rte_flow_item items[],
+ uint8_t at_idx,
+ struct mlx5dr_rule_action rule_actions[])
+{
+ struct mlx5dr_action_template *at = rule->matcher->at[at_idx];
+ struct mlx5dr_match_template *mt = rule->matcher->mt[mt_idx];
+ bool is_jumbo = mlx5dr_definer_is_jumbo(mt->definer);
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
+ struct mlx5dr_send_ste_attr ste_attr = {0};
+ struct mlx5dr_send_ring_dep_wqe *dep_wqe;
+ struct mlx5dr_actions_wqe_setter *setter;
+ struct mlx5dr_actions_apply_data apply;
+ struct mlx5dr_send_engine *queue;
+ uint8_t total_stes, action_stes;
+ int i, ret;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5dr_send_engine_err(queue))) {
+ rte_errno = EIO;
+ return rte_errno;
+ }
+
+ mlx5dr_rule_create_init(rule, &ste_attr, &apply);
+
+ /* Allocate dependent match WQE since rule might have dependent writes.
+ * The queued dependent WQE can be later aborted or kept as a dependency.
+ * dep_wqe buffers (ctrl, data) are also reused for all STE writes.
+ */
+ dep_wqe = mlx5dr_send_add_new_dep_wqe(queue);
+ mlx5dr_rule_init_dep_wqe(dep_wqe, rule, items, attr->user_data);
+
+ ste_attr.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ ste_attr.wqe_data = &dep_wqe->wqe_data;
+ apply.wqe_ctrl = &dep_wqe->wqe_ctrl;
+ apply.wqe_data = (uint32_t *)&dep_wqe->wqe_data;
+ apply.rule_action = rule_actions;
+ apply.queue = queue;
+
+ setter = &at->setters[at->num_of_action_stes];
+ total_stes = at->num_of_action_stes + (is_jumbo && !at->only_term);
+ action_stes = total_stes - 1;
+
+ if (action_stes) {
+ /* Allocate action STEs for complex rules */
+ ret = mlx5dr_rule_alloc_action_ste(rule, attr);
+ if (ret) {
+ DR_LOG(ERR, "Failed to allocate action memory %d", ret);
+ mlx5dr_send_abort_new_dep_wqe(queue);
+ return ret;
+ }
+ /* Skip RX/TX based on the dep_wqe init */
+ ste_attr.rtc_0 = dep_wqe->rtc_0 ? matcher->action_ste.rtc_0->id : 0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1 ? matcher->action_ste.rtc_1->id : 0;
+ /* Action STEs are written to a specific index last to first */
+ ste_attr.direct_index = rule->action_ste_idx + action_stes;
+ apply.next_direct_idx = ste_attr.direct_index;
+ } else {
+ apply.next_direct_idx = 0;
+ }
+
+ for (i = total_stes; i-- > 0;) {
+ mlx5dr_action_apply_setter(&apply, setter--, !i && is_jumbo);
+
+ if (i == 0) {
+ /* Handle last match STE */
+ mlx5dr_definer_create_tag(items, mt->fc, mt->fc_sz,
+ (uint8_t *)dep_wqe->wqe_data.action);
+
+ /* Rule has dependent WQEs, match dep_wqe is queued */
+ if (action_stes || apply.require_dep)
+ break;
+
+ /* Rule has no dependencies, abort dep_wqe and send WQE now */
+ mlx5dr_send_abort_new_dep_wqe(queue);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = dep_wqe->user_data;
+ ste_attr.send_attr.rule = dep_wqe->rule;
+ ste_attr.direct_index = 0;
+ ste_attr.rtc_0 = dep_wqe->rtc_0;
+ ste_attr.rtc_1 = dep_wqe->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = dep_wqe->retry_rtc_0;
+ ste_attr.retry_rtc_1 = dep_wqe->retry_rtc_1;
+ } else {
+ apply.next_direct_idx = --ste_attr.direct_index;
+ }
+
+ mlx5dr_send_ste(queue, &ste_attr);
+ }
+
+ /* Backup TAG on the rule for deletion */
+ if (is_jumbo)
+ memcpy(rule->tag.jumbo, dep_wqe->wqe_data.action, MLX5DR_JUMBO_TAG_SZ);
+ else
+ memcpy(rule->tag.match, dep_wqe->wqe_data.tag, MLX5DR_MATCH_TAG_SZ);
+
+ mlx5dr_send_engine_inc_rule(queue);
+
+ /* Send dependent WQEs */
+ if (!attr->burst)
+ mlx5dr_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
+static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr)
+{
+ struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5dr_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ mlx5dr_rule_gen_comp(queue, rule, false,
+ attr->user_data, MLX5DR_RULE_STATUS_DELETED);
+
+ /* Rule failed now we can safely release action STEs */
+ mlx5dr_rule_free_action_ste_idx(rule);
+
+ /* If a rule that was indicated as burst (need to trigger HW) has failed
+ * insertion we won't ring the HW as nothing is being written to the WQ.
+ * In such case update the last WQE and ring the HW with that work
+ */
+ if (attr->burst)
+ return;
+
+ mlx5dr_send_all_dep_wqe(queue);
+ mlx5dr_send_engine_flush_queue(queue);
+}
+
+static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr)
+{
+ struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
+ struct mlx5dr_send_ste_attr ste_attr = {0};
+ struct mlx5dr_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+
+ /* Rule is not completed yet */
+ if (rule->status == MLX5DR_RULE_STATUS_CREATING) {
+ rte_errno = EBUSY;
+ return rte_errno;
+ }
+
+ /* Rule failed and doesn't require cleanup */
+ if (rule->status == MLX5DR_RULE_STATUS_FAILED) {
+ mlx5dr_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ if (unlikely(mlx5dr_send_engine_err(queue))) {
+ mlx5dr_rule_destroy_failed_hws(rule, attr);
+ return 0;
+ }
+
+ mlx5dr_send_engine_inc_rule(queue);
+
+ /* Send dependent WQE */
+ if (!attr->burst)
+ mlx5dr_send_all_dep_wqe(queue);
+
+ rule->status = MLX5DR_RULE_STATUS_DELETING;
+
+ ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
+ ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
+ ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
+
+ ste_attr.send_attr.rule = rule;
+ ste_attr.send_attr.notify_hw = !attr->burst;
+ ste_attr.send_attr.user_data = attr->user_data;
+
+ ste_attr.rtc_0 = rule->rtc_0;
+ ste_attr.rtc_1 = rule->rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.wqe_ctrl = &wqe_ctrl;
+ ste_attr.wqe_tag = &rule->tag;
+ ste_attr.wqe_tag_is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt[0]->definer);
+ ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
+
+ mlx5dr_send_ste(queue, &ste_attr);
+
+ return 0;
+}
+
+static int mlx5dr_rule_create_root(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *rule_attr,
+ const struct rte_flow_item items[],
+ uint8_t at_idx,
+ struct mlx5dr_rule_action rule_actions[])
+{
+ struct mlx5dv_flow_matcher *dv_matcher = rule->matcher->dv_matcher;
+ uint8_t num_actions = rule->matcher->at[at_idx]->num_actions;
+ struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
+ struct mlx5dv_flow_match_parameters *value;
+ struct mlx5_flow_attr flow_attr = {0};
+ struct mlx5dv_flow_action_attr *attr;
+ struct rte_flow_error error;
+ uint8_t match_criteria;
+ int ret;
+
+ attr = simple_calloc(num_actions, sizeof(*attr));
+ if (!attr) {
+ rte_errno = ENOMEM;
+ return rte_errno;
+ }
+
+ value = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) +
+ offsetof(struct mlx5dv_flow_match_parameters, match_buf));
+ if (!value) {
+ rte_errno = ENOMEM;
+ goto free_attr;
+ }
+
+ flow_attr.tbl_type = rule->matcher->tbl->type;
+
+ ret = flow_dv_translate_items_hws(items, &flow_attr, value->match_buf,
+ MLX5_SET_MATCHER_HS_V, NULL,
+ &match_criteria,
+ &error);
+ if (ret) {
+ DR_LOG(ERR, "Failed to convert items to PRM [%s]", error.message);
+ goto free_value;
+ }
+
+ /* Convert actions to verb action attr */
+ ret = mlx5dr_action_root_build_attr(rule_actions, num_actions, attr);
+ if (ret)
+ goto free_value;
+
+ /* Create verb flow */
+ value->match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ rule->flow = mlx5_glue->dv_create_flow_root(dv_matcher,
+ value,
+ num_actions,
+ attr);
+
+ mlx5dr_rule_gen_comp(&ctx->send_queue[rule_attr->queue_id], rule, !rule->flow,
+ rule_attr->user_data, MLX5DR_RULE_STATUS_CREATED);
+
+ simple_free(value);
+ simple_free(attr);
+
+ return 0;
+
+free_value:
+ simple_free(value);
+free_attr:
+ simple_free(attr);
+
+ return -rte_errno;
+}
+
+static int mlx5dr_rule_destroy_root(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr)
+{
+ struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
+ int err = 0;
+
+ if (rule->flow)
+ err = ibv_destroy_flow(rule->flow);
+
+ mlx5dr_rule_gen_comp(&ctx->send_queue[attr->queue_id], rule, err,
+ attr->user_data, MLX5DR_RULE_STATUS_DELETED);
+
+ return 0;
+}
+
+int mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+ uint8_t mt_idx,
+ const struct rte_flow_item items[],
+ uint8_t at_idx,
+ struct mlx5dr_rule_action rule_actions[],
+ struct mlx5dr_rule_attr *attr,
+ struct mlx5dr_rule *rule_handle)
+{
+ struct mlx5dr_context *ctx;
+ int ret;
+
+ rule_handle->matcher = matcher;
+ ctx = matcher->tbl->ctx;
+
+ if (unlikely(!attr->user_data)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+
+ assert(matcher->num_of_mt >= mt_idx);
+ assert(matcher->num_of_at >= at_idx);
+
+ if (unlikely(mlx5dr_table_is_root(matcher->tbl)))
+ ret = mlx5dr_rule_create_root(rule_handle,
+ attr,
+ items,
+ at_idx,
+ rule_actions);
+ else
+ ret = mlx5dr_rule_create_hws(rule_handle,
+ attr,
+ mt_idx,
+ items,
+ at_idx,
+ rule_actions);
+ return -ret;
+}
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr)
+{
+ struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
+ int ret;
+
+ if (unlikely(!attr->user_data)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+
+ /* Check if there is room in queue */
+ if (unlikely(mlx5dr_send_engine_full(&ctx->send_queue[attr->queue_id]))) {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+
+ if (unlikely(mlx5dr_table_is_root(rule->matcher->tbl)))
+ ret = mlx5dr_rule_destroy_root(rule, attr);
+ else
+ ret = mlx5dr_rule_destroy_hws(rule, attr);
+
+ return -ret;
+}
+
+size_t mlx5dr_rule_get_handle_size(void)
+{
+ return sizeof(struct mlx5dr_rule);
+}
diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.h b/drivers/net/mlx5/hws/mlx5dr_rule.h
new file mode 100644
index 0000000000..96c85674f2
--- /dev/null
+++ b/drivers/net/mlx5/hws/mlx5dr_rule.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef MLX5DR_RULE_H_
+#define MLX5DR_RULE_H_
+
+enum {
+ MLX5DR_STE_CTRL_SZ = 20,
+ MLX5DR_ACTIONS_SZ = 12,
+ MLX5DR_MATCH_TAG_SZ = 32,
+ MLX5DR_JUMBO_TAG_SZ = 44,
+};
+
+enum mlx5dr_rule_status {
+ MLX5DR_RULE_STATUS_UNKNOWN,
+ MLX5DR_RULE_STATUS_CREATING,
+ MLX5DR_RULE_STATUS_CREATED,
+ MLX5DR_RULE_STATUS_DELETING,
+ MLX5DR_RULE_STATUS_DELETED,
+ MLX5DR_RULE_STATUS_FAILING,
+ MLX5DR_RULE_STATUS_FAILED,
+};
+
+struct mlx5dr_rule_match_tag {
+ union {
+ uint8_t jumbo[MLX5DR_JUMBO_TAG_SZ];
+ struct {
+ uint8_t reserved[MLX5DR_ACTIONS_SZ];
+ uint8_t match[MLX5DR_MATCH_TAG_SZ];
+ };
+ };
+};
+
+struct mlx5dr_rule {
+ struct mlx5dr_matcher *matcher;
+ union {
+ struct mlx5dr_rule_match_tag tag;
+ struct ibv_flow *flow;
+ };
+ uint32_t rtc_0; /* The RTC into which the STE was inserted */
+ uint32_t rtc_1; /* The RTC into which the STE was inserted */
+ int action_ste_idx; /* Action STE pool ID */
+ uint8_t status; /* enum mlx5dr_rule_status */
+ uint8_t pending_wqes;
+};
+
+void mlx5dr_rule_free_action_ste_idx(struct mlx5dr_rule *rule);
+
+#endif /* MLX5DR_RULE_H_ */
--
2.18.1
next prev parent reply other threads:[~2022-10-20 16:01 UTC|newest]
Thread overview: 134+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-22 19:03 [v1 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-09-22 19:03 ` [v1 01/19] net/mlx5: split flow item translation Alex Vesker
2022-09-22 19:03 ` [v1 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-09-22 19:03 ` [v1 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-09-22 19:03 ` [v1 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-09-22 19:03 ` [v1 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-09-22 19:03 ` [v1 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-09-22 19:03 ` [v1 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-09-22 19:03 ` [v1 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-09-22 19:03 ` [v1 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-09-22 19:03 ` [v1 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-09-22 19:03 ` [v1 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-09-22 19:03 ` [v1 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-09-22 19:03 ` [v1 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-09-22 19:03 ` [v1 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-09-22 19:03 ` [v1 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-09-22 19:03 ` [v1 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-09-22 19:03 ` [v1 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-09-22 19:03 ` [v1 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-09-22 19:03 ` [v1 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-06 15:03 ` [v2 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-06 15:03 ` [v2 01/19] net/mlx5: split flow item translation Alex Vesker
2022-10-06 15:03 ` [v2 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-06 15:03 ` [v2 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-06 15:03 ` [v2 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-06 15:03 ` [v2 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-10-06 15:03 ` [v2 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-10-06 15:03 ` [v2 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-06 15:03 ` [v2 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-10-06 15:03 ` [v2 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-06 15:03 ` [v2 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-06 15:03 ` [v2 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-06 15:03 ` [v2 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-06 15:03 ` [v2 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-06 15:03 ` [v2 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-06 15:03 ` [v2 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-06 15:03 ` [v2 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-06 15:03 ` [v2 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-06 15:03 ` [v2 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-06 15:03 ` [v2 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-14 11:48 ` [v3 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-14 11:48 ` [v3 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-14 11:48 ` [v3 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-14 11:48 ` [v3 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-14 11:48 ` [v3 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-14 11:48 ` [v3 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-14 11:48 ` [v3 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-14 11:48 ` [v3 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-14 11:48 ` [v3 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-14 11:48 ` [v3 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-14 11:48 ` [v3 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-14 11:48 ` [v3 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-14 11:48 ` [v3 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-14 11:48 ` [v3 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-14 11:48 ` [v3 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-14 11:48 ` [v3 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-14 11:48 ` [v3 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-14 11:48 ` [v3 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-14 11:48 ` [v3 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 14:42 ` [v4 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 14:42 ` [v4 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 14:42 ` [v4 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 14:42 ` [v4 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 14:42 ` [v4 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 14:42 ` [v4 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 14:42 ` [v4 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 14:42 ` [v4 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 14:42 ` [v4 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 14:42 ` [v4 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-19 14:42 ` [v4 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 14:42 ` [v4 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 14:42 ` [v4 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 14:42 ` [v4 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 14:42 ` [v4 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 14:42 ` [v4 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 14:42 ` [v4 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 14:42 ` [v4 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 14:42 ` [v4 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 20:57 ` [v5 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 20:57 ` [v5 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 20:57 ` [v5 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 20:57 ` [v5 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 20:57 ` [v5 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 20:57 ` [v5 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 20:57 ` [v5 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 20:57 ` [v5 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 20:57 ` [v5 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 20:57 ` [v5 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-19 20:57 ` [v5 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 20:57 ` [v5 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 20:57 ` [v5 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 20:57 ` [v5 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 20:57 ` [v5 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 20:57 ` [v5 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 20:57 ` [v5 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 20:57 ` [v5 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 20:57 ` [v5 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-20 15:57 ` [v6 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-20 15:57 ` [v6 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-24 6:47 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-24 6:49 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-24 6:51 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` Alex Vesker [this message]
2022-10-24 6:54 ` [v6 15/18] net/mlx5/hws: Add HWS rule object Slava Ovsiienko
2022-10-20 15:57 ` [v6 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-20 15:57 ` [v6 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-24 10:56 ` [v6 00/18] net/mlx5: Add HW steering low level support Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221020155749.16643-16-valex@nvidia.com \
--to=valex@nvidia.com \
--cc=dev@dpdk.org \
--cc=erezsh@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).