DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
	<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>
Subject: [v1 06/16] net/mlx5/hws: add send FW match STE using gen WQE
Date: Tue, 31 Jan 2023 11:33:35 +0200	[thread overview]
Message-ID: <20230131093346.1261066-7-valex@nvidia.com> (raw)
In-Reply-To: <20230131093346.1261066-1-valex@nvidia.com>

Send STE WQE function wraps the send WQE command to support WQE
build and FDB abstraction. Sending using FW is different from
sending from HW since FW returns the completion immediately which
requires us to retry on failure and prepare the completion as
part of the send process.

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_send.c | 134 +++++++++++++++++++++++++++++
 drivers/net/mlx5/hws/mlx5dr_send.h |   7 +-
 2 files changed, 140 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index a507e5f626..a9958df4f2 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -235,6 +235,140 @@ void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
 	send_attr->fence = fence;
 }
 
+static
+int mlx5dr_send_wqe_fw(struct ibv_context *ibv_ctx,
+		       uint32_t pd_num,
+		       struct mlx5dr_send_engine_post_attr *send_attr,
+		       struct mlx5dr_wqe_gta_ctrl_seg *send_wqe_ctrl,
+		       void *send_wqe_match_data,
+		       void *send_wqe_match_tag,
+		       bool is_jumbo,
+		       uint8_t gta_opcode)
+{
+	bool has_match = send_wqe_match_data || send_wqe_match_tag;
+	struct mlx5dr_wqe_gta_data_seg_ste gta_wqe_data0 = {0};
+	struct mlx5dr_wqe_gta_ctrl_seg gta_wqe_ctrl = {0};
+	struct mlx5dr_cmd_generate_wqe_attr attr = {0};
+	struct mlx5dr_wqe_ctrl_seg wqe_ctrl = {0};
+	struct mlx5_cqe64 cqe;
+	uint32_t flags = 0;
+	int ret;
+
+	/* Set WQE control */
+	wqe_ctrl.opmod_idx_opcode =
+		rte_cpu_to_be_32((send_attr->opmod << 24) | send_attr->opcode);
+	wqe_ctrl.qpn_ds =
+		rte_cpu_to_be_32((send_attr->len + sizeof(struct mlx5dr_wqe_ctrl_seg)) / 16);
+	flags |= send_attr->notify_hw ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+	wqe_ctrl.flags = rte_cpu_to_be_32(flags);
+	wqe_ctrl.imm = rte_cpu_to_be_32(send_attr->id);
+
+	/* Set GTA WQE CTRL */
+	memcpy(gta_wqe_ctrl.stc_ix, send_wqe_ctrl->stc_ix, sizeof(send_wqe_ctrl->stc_ix));
+	gta_wqe_ctrl.op_dirix = htobe32(gta_opcode << 28);
+
+	/* Set GTA match WQE DATA */
+	if (has_match) {
+		if (send_wqe_match_data)
+			memcpy(&gta_wqe_data0, send_wqe_match_data, sizeof(gta_wqe_data0));
+		else
+			mlx5dr_send_wqe_set_tag(&gta_wqe_data0, send_wqe_match_tag, is_jumbo);
+
+		gta_wqe_data0.rsvd1_definer = htobe32(send_attr->match_definer_id << 8);
+		attr.gta_data_0 = (uint8_t *)&gta_wqe_data0;
+	}
+
+	attr.pdn = pd_num;
+	attr.wqe_ctrl = (uint8_t *)&wqe_ctrl;
+	attr.gta_ctrl = (uint8_t *)&gta_wqe_ctrl;
+
+send_wqe:
+	ret = mlx5dr_cmd_generate_wqe(ibv_ctx, &attr, &cqe);
+	if (ret) {
+		DR_LOG(ERR, "Failed to write WQE using command");
+		return ret;
+	}
+
+	if ((mlx5dv_get_cqe_opcode(&cqe) == MLX5_CQE_REQ) &&
+	    (rte_be_to_cpu_32(cqe.byte_cnt) >> 31 == 0)) {
+		*send_attr->used_id = send_attr->id;
+		return 0;
+	}
+
+	/* Retry if rule failed */
+	if (send_attr->retry_id) {
+		wqe_ctrl.imm = rte_cpu_to_be_32(send_attr->retry_id);
+		send_attr->id = send_attr->retry_id;
+		send_attr->retry_id = 0;
+		goto send_wqe;
+	}
+
+	return -1;
+}
+
+void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
+			 struct mlx5dr_send_ste_attr *ste_attr)
+{
+	struct mlx5dr_send_engine_post_attr *send_attr = &ste_attr->send_attr;
+	struct mlx5dr_rule *rule = send_attr->rule;
+	struct ibv_context *ibv_ctx;
+	struct mlx5dr_context *ctx;
+	uint16_t queue_id;
+	uint32_t pdn;
+	int ret;
+
+	ctx = rule->matcher->tbl->ctx;
+	queue_id = queue - ctx->send_queue;
+	ibv_ctx = ctx->ibv_ctx;
+	pdn = ctx->pd_num;
+
+	/* Writing through FW can't HW fence, therefore we drain the queue */
+	if (send_attr->fence)
+		mlx5dr_send_queue_action(ctx,
+					 queue_id,
+					 MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
+	if (ste_attr->rtc_1) {
+		send_attr->id = ste_attr->rtc_1;
+		send_attr->used_id = ste_attr->used_id_rtc_1;
+		send_attr->retry_id = ste_attr->retry_rtc_1;
+		ret = mlx5dr_send_wqe_fw(ibv_ctx, pdn, send_attr,
+					 ste_attr->wqe_ctrl,
+					 ste_attr->wqe_data,
+					 ste_attr->wqe_tag,
+					 ste_attr->wqe_tag_is_jumbo,
+					 ste_attr->gta_opcode);
+		if (ret)
+			goto fail_rule;
+	}
+
+	if (ste_attr->rtc_0) {
+		send_attr->id = ste_attr->rtc_0;
+		send_attr->used_id = ste_attr->used_id_rtc_0;
+		send_attr->retry_id = ste_attr->retry_rtc_0;
+		ret = mlx5dr_send_wqe_fw(ibv_ctx, pdn, send_attr,
+					 ste_attr->wqe_ctrl,
+					 ste_attr->wqe_data,
+					 ste_attr->wqe_tag,
+					 ste_attr->wqe_tag_is_jumbo,
+					 ste_attr->gta_opcode);
+		if (ret)
+			goto fail_rule;
+	}
+
+	/* Increase the status, this only works on good flow as the enum
+	 * is arrange it away creating -> created -> deleting -> deleted
+	 */
+	rule->status++;
+	mlx5dr_send_engine_gen_comp(queue, send_attr->user_data, RTE_FLOW_OP_SUCCESS);
+	return;
+
+fail_rule:
+	rule->status = !rule->rtc_0 && !rule->rtc_1 ?
+		MLX5DR_RULE_STATUS_FAILED : MLX5DR_RULE_STATUS_FAILING;
+	mlx5dr_send_engine_gen_comp(queue, send_attr->user_data, RTE_FLOW_OP_ERROR);
+}
+
 static void mlx5dr_send_engine_retry_post_send(struct mlx5dr_send_engine *queue,
 					       struct mlx5dr_send_ring_priv *priv,
 					       uint16_t wqe_cnt)
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.h b/drivers/net/mlx5/hws/mlx5dr_send.h
index fcddcc6366..1e845b1c7a 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.h
+++ b/drivers/net/mlx5/hws/mlx5dr_send.h
@@ -52,7 +52,8 @@ struct mlx5dr_wqe_gta_ctrl_seg {
 
 struct mlx5dr_wqe_gta_data_seg_ste {
 	__be32 rsvd0_ctr_id;
-	__be32 rsvd1[4];
+	__be32 rsvd1_definer;
+	__be32 rsvd2[3];
 	__be32 action[3];
 	__be32 tag[8];
 };
@@ -159,6 +160,7 @@ struct mlx5dr_send_engine_post_attr {
 	uint8_t opmod;
 	uint8_t notify_hw;
 	uint8_t fence;
+	uint8_t match_definer_id;
 	size_t len;
 	struct mlx5dr_rule *rule;
 	uint32_t id;
@@ -238,6 +240,9 @@ void mlx5dr_send_engine_post_end(struct mlx5dr_send_engine_post_ctrl *ctrl,
 void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
 		     struct mlx5dr_send_ste_attr *ste_attr);
 
+void mlx5dr_send_stes_fw(struct mlx5dr_send_engine *queue,
+			 struct mlx5dr_send_ste_attr *ste_attr);
+
 void mlx5dr_send_engine_flush_queue(struct mlx5dr_send_engine *queue);
 
 static inline bool mlx5dr_send_engine_empty(struct mlx5dr_send_engine *queue)
-- 
2.18.1


  parent reply	other threads:[~2023-01-31  9:35 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-31  9:33 [v1 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-01-31  9:33 ` [v1 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-01-31  9:33 ` [v1 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-01-31  9:33 ` [v1 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-01-31  9:33 ` [v1 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-01-31  9:33 ` [v1 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-01-31  9:33 ` Alex Vesker [this message]
2023-01-31  9:33 ` [v1 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-01-31  9:33 ` [v1 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-01-31  9:33 ` [v1 09/16] net/mlx5/hws: support range match Alex Vesker
2023-01-31  9:33 ` [v1 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-01-31  9:33 ` [v1 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-01-31  9:33 ` [v1 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-01-31  9:33 ` [v1 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-01-31  9:33 ` [v1 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-01-31  9:33 ` [v1 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-01-31  9:33 ` [v1 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-01  7:27 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-02-01  7:28   ` [v2 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-02-01  7:28   ` [v2 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-02-01  7:28   ` [v2 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-02-01  7:28   ` [v2 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-02-01  7:28   ` [v2 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-02-01  7:28   ` [v2 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-02-01  7:28   ` [v2 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-02-01  7:28   ` [v2 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-02-01  7:28   ` [v2 09/16] net/mlx5/hws: support range match Alex Vesker
2023-02-01  7:28   ` [v2 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-02-01  7:28   ` [v2 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-02-01  7:28   ` [v2 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-02-01  7:28   ` [v2 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-02-01  7:28   ` [v2 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-02-01  7:28   ` [v2 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-02-01  7:28   ` [v2 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-06 15:07   ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Matan Azrad
2023-02-13  8:27   ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230131093346.1261066-7-valex@nvidia.com \
    --to=valex@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).