From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>
Subject: [v2 13/16] net/mlx5/hws: add FW WQE rule creation logic
Date: Wed, 1 Feb 2023 09:28:12 +0200 [thread overview]
Message-ID: <20230201072815.1329101-14-valex@nvidia.com> (raw)
In-Reply-To: <20230201072815.1329101-1-valex@nvidia.com>
FW WQE and HW WQE are done in a similar way but not to
jeopardize the performance rule creation is done over
the new FW rule creation function. The deletion function
is shared between both flows.
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_rule.c | 180 +++++++++++++++++++++++++++--
drivers/net/mlx5/hws/mlx5dr_rule.h | 2 +
drivers/net/mlx5/hws/mlx5dr_send.h | 9 +-
3 files changed, 180 insertions(+), 11 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.c b/drivers/net/mlx5/hws/mlx5dr_rule.c
index f5a0c46315..9d5e5b11a5 100644
--- a/drivers/net/mlx5/hws/mlx5dr_rule.c
+++ b/drivers/net/mlx5/hws/mlx5dr_rule.c
@@ -112,6 +112,62 @@ static void mlx5dr_rule_gen_comp(struct mlx5dr_send_engine *queue,
mlx5dr_send_engine_gen_comp(queue, user_data, comp_status);
}
+static void
+mlx5dr_rule_save_delete_info(struct mlx5dr_rule *rule,
+ struct mlx5dr_send_ste_attr *ste_attr)
+{
+ if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
+ uint8_t *src_tag;
+
+ /* Save match definer id and tag for delete */
+ rule->tag_ptr = simple_calloc(2, sizeof(*rule->tag_ptr));
+ assert(rule->tag_ptr);
+
+ src_tag = (uint8_t *)ste_attr->wqe_data->tag;
+ memcpy(rule->tag_ptr[0].match, src_tag, MLX5DR_MATCH_TAG_SZ);
+ rule->tag_ptr[1].reserved[0] = ste_attr->send_attr.match_definer_id;
+
+ /* Save range definer id and tag for delete */
+ if (ste_attr->range_wqe_data) {
+ src_tag = (uint8_t *)ste_attr->range_wqe_data->tag;
+ memcpy(rule->tag_ptr[1].match, src_tag, MLX5DR_MATCH_TAG_SZ);
+ rule->tag_ptr[1].reserved[1] = ste_attr->send_attr.range_definer_id;
+ }
+ return;
+ }
+
+ if (ste_attr->wqe_tag_is_jumbo)
+ memcpy(rule->tag.jumbo, ste_attr->wqe_data->jumbo, MLX5DR_JUMBO_TAG_SZ);
+ else
+ memcpy(rule->tag.match, ste_attr->wqe_data->tag, MLX5DR_MATCH_TAG_SZ);
+}
+
+static void
+mlx5dr_rule_clear_delete_info(struct mlx5dr_rule *rule)
+{
+ if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher)))
+ simple_free(rule->tag_ptr);
+}
+
+static void
+mlx5dr_rule_load_delete_info(struct mlx5dr_rule *rule,
+ struct mlx5dr_send_ste_attr *ste_attr)
+{
+ if (unlikely(mlx5dr_matcher_req_fw_wqe(rule->matcher))) {
+ /* Load match definer id and tag for delete */
+ ste_attr->wqe_tag = &rule->tag_ptr[0];
+ ste_attr->send_attr.match_definer_id = rule->tag_ptr[1].reserved[0];
+
+ /* Load range definer id and tag for delete */
+ if (rule->matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER) {
+ ste_attr->range_wqe_tag = &rule->tag_ptr[1];
+ ste_attr->send_attr.range_definer_id = rule->tag_ptr[1].reserved[1];
+ }
+ } else {
+ ste_attr->wqe_tag = &rule->tag;
+ }
+}
+
static int mlx5dr_rule_alloc_action_ste(struct mlx5dr_rule *rule,
struct mlx5dr_rule_attr *attr)
{
@@ -180,6 +236,97 @@ static void mlx5dr_rule_create_init(struct mlx5dr_rule *rule,
apply->require_dep = 0;
}
+static int mlx5dr_rule_create_hws_fw_wqe(struct mlx5dr_rule *rule,
+ struct mlx5dr_rule_attr *attr,
+ uint8_t mt_idx,
+ const struct rte_flow_item items[],
+ uint8_t at_idx,
+ struct mlx5dr_rule_action rule_actions[])
+{
+ struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
+ struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
+ struct mlx5dr_send_ring_dep_wqe range_wqe = {{0}};
+ struct mlx5dr_send_ring_dep_wqe match_wqe = {{0}};
+ bool is_range = mlx5dr_matcher_mt_is_range(mt);
+ bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
+ struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_context *ctx = matcher->tbl->ctx;
+ struct mlx5dr_send_ste_attr ste_attr = {0};
+ struct mlx5dr_actions_apply_data apply;
+ struct mlx5dr_send_engine *queue;
+
+ queue = &ctx->send_queue[attr->queue_id];
+ if (unlikely(mlx5dr_send_engine_err(queue))) {
+ rte_errno = EIO;
+ return rte_errno;
+ }
+
+ mlx5dr_rule_create_init(rule, &ste_attr, &apply);
+ mlx5dr_rule_init_dep_wqe(&match_wqe, rule, items, mt, attr->user_data);
+ mlx5dr_rule_init_dep_wqe(&range_wqe, rule, items, mt, attr->user_data);
+
+ ste_attr.direct_index = 0;
+ ste_attr.rtc_0 = match_wqe.rtc_0;
+ ste_attr.rtc_1 = match_wqe.rtc_1;
+ ste_attr.used_id_rtc_0 = &rule->rtc_0;
+ ste_attr.used_id_rtc_1 = &rule->rtc_1;
+ ste_attr.retry_rtc_0 = match_wqe.retry_rtc_0;
+ ste_attr.retry_rtc_1 = match_wqe.retry_rtc_1;
+ ste_attr.send_attr.rule = match_wqe.rule;
+ ste_attr.send_attr.user_data = match_wqe.user_data;
+
+ ste_attr.send_attr.fence = 1;
+ ste_attr.send_attr.notify_hw = 1;
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
+
+ /* Prepare match STE TAG */
+ ste_attr.wqe_ctrl = &match_wqe.wqe_ctrl;
+ ste_attr.wqe_data = &match_wqe.wqe_data;
+ ste_attr.send_attr.match_definer_id = mlx5dr_definer_get_id(mt->definer);
+
+ mlx5dr_definer_create_tag(items,
+ mt->fc,
+ mt->fc_sz,
+ (uint8_t *)match_wqe.wqe_data.action);
+
+ /* Prepare range STE TAG */
+ if (is_range) {
+ ste_attr.range_wqe_data = &range_wqe.wqe_data;
+ ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
+ ste_attr.send_attr.range_definer_id = mlx5dr_definer_get_id(mt->range_definer);
+
+ mlx5dr_definer_create_tag_range(items,
+ mt->fcr,
+ mt->fcr_sz,
+ (uint8_t *)range_wqe.wqe_data.action);
+ }
+
+ /* Apply the actions on the last STE */
+ apply.queue = queue;
+ apply.next_direct_idx = 0;
+ apply.rule_action = rule_actions;
+ apply.wqe_ctrl = &match_wqe.wqe_ctrl;
+ apply.wqe_data = (uint32_t *)(is_range ?
+ &range_wqe.wqe_data :
+ &match_wqe.wqe_data);
+
+ /* Skip setters[0] used for jumbo STE since not support with FW WQE */
+ mlx5dr_action_apply_setter(&apply, &at->setters[1], 0);
+
+ /* Send WQEs to FW */
+ mlx5dr_send_stes_fw(queue, &ste_attr);
+
+ /* Backup TAG on the rule for deletion */
+ mlx5dr_rule_save_delete_info(rule, &ste_attr);
+ mlx5dr_send_engine_inc_rule(queue);
+
+ /* Send dependent WQEs */
+ if (!attr->burst)
+ mlx5dr_send_all_dep_wqe(queue);
+
+ return 0;
+}
+
static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
struct mlx5dr_rule_attr *attr,
uint8_t mt_idx,
@@ -189,7 +336,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
{
struct mlx5dr_action_template *at = &rule->matcher->at[at_idx];
struct mlx5dr_match_template *mt = &rule->matcher->mt[mt_idx];
- bool is_jumbo = mlx5dr_definer_is_jumbo(mt->definer);
+ bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(mt);
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_context *ctx = matcher->tbl->ctx;
struct mlx5dr_send_ste_attr ste_attr = {0};
@@ -200,6 +347,11 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
uint8_t total_stes, action_stes;
int i, ret;
+ /* Insert rule using FW WQE if cannot use GTA WQE */
+ if (unlikely(mlx5dr_matcher_req_fw_wqe(matcher)))
+ return mlx5dr_rule_create_hws_fw_wqe(rule, attr, mt_idx, items,
+ at_idx, rule_actions);
+
queue = &ctx->send_queue[attr->queue_id];
if (unlikely(mlx5dr_send_engine_err(queue))) {
rte_errno = EIO;
@@ -283,11 +435,7 @@ static int mlx5dr_rule_create_hws(struct mlx5dr_rule *rule,
}
/* Backup TAG on the rule for deletion */
- if (is_jumbo)
- memcpy(rule->tag.jumbo, dep_wqe->wqe_data.action, MLX5DR_JUMBO_TAG_SZ);
- else
- memcpy(rule->tag.match, dep_wqe->wqe_data.tag, MLX5DR_MATCH_TAG_SZ);
-
+ mlx5dr_rule_save_delete_info(rule, &ste_attr);
mlx5dr_send_engine_inc_rule(queue);
/* Send dependent WQEs */
@@ -311,6 +459,9 @@ static void mlx5dr_rule_destroy_failed_hws(struct mlx5dr_rule *rule,
/* Rule failed now we can safely release action STEs */
mlx5dr_rule_free_action_ste_idx(rule);
+ /* Clear complex tag */
+ mlx5dr_rule_clear_delete_info(rule);
+
/* If a rule that was indicated as burst (need to trigger HW) has failed
* insertion we won't ring the HW as nothing is being written to the WQ.
* In such case update the last WQE and ring the HW with that work
@@ -327,6 +478,9 @@ static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
{
struct mlx5dr_context *ctx = rule->matcher->tbl->ctx;
struct mlx5dr_matcher *matcher = rule->matcher;
+ bool fw_wqe = mlx5dr_matcher_req_fw_wqe(matcher);
+ bool is_range = mlx5dr_matcher_mt_is_range(matcher->mt);
+ bool is_jumbo = mlx5dr_matcher_mt_is_jumbo(matcher->mt);
struct mlx5dr_wqe_gta_ctrl_seg wqe_ctrl = {0};
struct mlx5dr_send_ste_attr ste_attr = {0};
struct mlx5dr_send_engine *queue;
@@ -361,6 +515,8 @@ static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
ste_attr.send_attr.opmod = MLX5DR_WQE_GTA_OPMOD_STE;
ste_attr.send_attr.opcode = MLX5DR_WQE_OPCODE_TBL_ACCESS;
ste_attr.send_attr.len = MLX5DR_WQE_SZ_GTA_CTRL + MLX5DR_WQE_SZ_GTA_DATA;
+ if (unlikely(is_range))
+ ste_attr.send_attr.len += MLX5DR_WQE_SZ_GTA_DATA;
ste_attr.send_attr.rule = rule;
ste_attr.send_attr.notify_hw = !attr->burst;
@@ -371,13 +527,19 @@ static int mlx5dr_rule_destroy_hws(struct mlx5dr_rule *rule,
ste_attr.used_id_rtc_0 = &rule->rtc_0;
ste_attr.used_id_rtc_1 = &rule->rtc_1;
ste_attr.wqe_ctrl = &wqe_ctrl;
- ste_attr.wqe_tag = &rule->tag;
- ste_attr.wqe_tag_is_jumbo = mlx5dr_definer_is_jumbo(matcher->mt->definer);
+ ste_attr.wqe_tag_is_jumbo = is_jumbo;
ste_attr.gta_opcode = MLX5DR_WQE_GTA_OP_DEACTIVATE;
if (unlikely(mlx5dr_matcher_is_insert_by_idx(matcher)))
ste_attr.direct_index = attr->rule_idx;
- mlx5dr_send_ste(queue, &ste_attr);
+ mlx5dr_rule_load_delete_info(rule, &ste_attr);
+
+ if (unlikely(fw_wqe)) {
+ mlx5dr_send_stes_fw(queue, &ste_attr);
+ mlx5dr_rule_clear_delete_info(rule);
+ } else {
+ mlx5dr_send_ste(queue, &ste_attr);
+ }
return 0;
}
diff --git a/drivers/net/mlx5/hws/mlx5dr_rule.h b/drivers/net/mlx5/hws/mlx5dr_rule.h
index f2fe418159..886cf77992 100644
--- a/drivers/net/mlx5/hws/mlx5dr_rule.h
+++ b/drivers/net/mlx5/hws/mlx5dr_rule.h
@@ -36,6 +36,8 @@ struct mlx5dr_rule {
struct mlx5dr_matcher *matcher;
union {
struct mlx5dr_rule_match_tag tag;
+ /* Pointer to tag to store more than one tag */
+ struct mlx5dr_rule_match_tag *tag_ptr;
struct ibv_flow *flow;
};
uint32_t rtc_0; /* The RTC into which the STE was inserted */
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.h b/drivers/net/mlx5/hws/mlx5dr_send.h
index 47bb66b3c7..d0977ec851 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.h
+++ b/drivers/net/mlx5/hws/mlx5dr_send.h
@@ -54,8 +54,13 @@ struct mlx5dr_wqe_gta_data_seg_ste {
__be32 rsvd0_ctr_id;
__be32 rsvd1_definer;
__be32 rsvd2[3];
- __be32 action[3];
- __be32 tag[8];
+ union {
+ struct {
+ __be32 action[3];
+ __be32 tag[8];
+ };
+ __be32 jumbo[11];
+ };
};
struct mlx5dr_wqe_gta_data_seg_arg {
--
2.18.1
next prev parent reply other threads:[~2023-02-01 7:30 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-31 9:33 [v1 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-01-31 9:33 ` [v1 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-01-31 9:33 ` [v1 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-01-31 9:33 ` [v1 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-01-31 9:33 ` [v1 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-01-31 9:33 ` [v1 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-01-31 9:33 ` [v1 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-01-31 9:33 ` [v1 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-01-31 9:33 ` [v1 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-01-31 9:33 ` [v1 09/16] net/mlx5/hws: support range match Alex Vesker
2023-01-31 9:33 ` [v1 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-01-31 9:33 ` [v1 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-01-31 9:33 ` [v1 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-01-31 9:33 ` [v1 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-01-31 9:33 ` [v1 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-01-31 9:33 ` [v1 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-01-31 9:33 ` [v1 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-01 7:27 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-02-01 7:28 ` [v2 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-02-01 7:28 ` [v2 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-02-01 7:28 ` [v2 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-02-01 7:28 ` [v2 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-02-01 7:28 ` [v2 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-02-01 7:28 ` [v2 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-02-01 7:28 ` [v2 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-02-01 7:28 ` [v2 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-02-01 7:28 ` [v2 09/16] net/mlx5/hws: support range match Alex Vesker
2023-02-01 7:28 ` [v2 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-02-01 7:28 ` [v2 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-02-01 7:28 ` [v2 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-02-01 7:28 ` Alex Vesker [this message]
2023-02-01 7:28 ` [v2 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-02-01 7:28 ` [v2 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-02-01 7:28 ` [v2 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-06 15:07 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Matan Azrad
2023-02-13 8:27 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230201072815.1329101-14-valex@nvidia.com \
--to=valex@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).