DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
	<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>, Erez Shitrit <erezsh@nvidia.com>
Subject: [v2 01/16] net/mlx5/hws: support synchronous drain
Date: Wed, 1 Feb 2023 09:28:00 +0200	[thread overview]
Message-ID: <20230201072815.1329101-2-valex@nvidia.com> (raw)
In-Reply-To: <20230201072815.1329101-1-valex@nvidia.com>

Until now we supported asynchronous drain, triggering the queue
to start the drain, now we added support for synchronous which
assures all the work was processed on the queue.

This is useful when working over a FW command and HW queue in parallel
sending arguments over the HW queue and match over the FW command
which requires synchronization.

This also fixes an issue with shared arguments send that require more than
one WQE.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr.h         |  6 ++++--
 drivers/net/mlx5/hws/mlx5dr_pat_arg.c | 27 ++++-----------------------
 drivers/net/mlx5/hws/mlx5dr_send.c    | 16 ++++++++++++++--
 drivers/net/mlx5/hws/mlx5dr_send.h    |  5 +++++
 drivers/net/mlx5/mlx5_flow_hw.c       |  2 +-
 5 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr.h b/drivers/net/mlx5/hws/mlx5dr.h
index b3b2bf34f2..2b02884dc3 100644
--- a/drivers/net/mlx5/hws/mlx5dr.h
+++ b/drivers/net/mlx5/hws/mlx5dr.h
@@ -86,8 +86,10 @@ enum mlx5dr_match_template_flags {
 };
 
 enum mlx5dr_send_queue_actions {
-	/* Start executing all pending queued rules and write to HW */
-	MLX5DR_SEND_QUEUE_ACTION_DRAIN = 1 << 0,
+	/* Start executing all pending queued rules */
+	MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC = 1 << 0,
+	/* Start executing all pending queued rules wait till completion */
+	MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC = 1 << 1,
 };
 
 struct mlx5dr_context_attr {
diff --git a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
index df451f1ae0..152025d302 100644
--- a/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
+++ b/drivers/net/mlx5/hws/mlx5dr_pat_arg.c
@@ -306,27 +306,6 @@ void mlx5dr_arg_decapl3_write(struct mlx5dr_send_engine *queue,
 	mlx5dr_send_engine_post_end(&ctrl, &send_attr);
 }
 
-static int
-mlx5dr_arg_poll_for_comp(struct mlx5dr_context *ctx, uint16_t queue_id)
-{
-	struct rte_flow_op_result comp[1];
-	int ret;
-
-	while (true) {
-		ret = mlx5dr_send_queue_poll(ctx, queue_id, comp, 1);
-		if (ret) {
-			if (ret < 0) {
-				DR_LOG(ERR, "Failed mlx5dr_send_queue_poll");
-			} else if (comp[0].status == RTE_FLOW_OP_ERROR) {
-				DR_LOG(ERR, "Got comp with error");
-				rte_errno = ENOENT;
-			}
-			break;
-		}
-	}
-	return (ret == 1 ? 0 : ret);
-}
-
 void mlx5dr_arg_write(struct mlx5dr_send_engine *queue,
 		      void *comp_data,
 		      uint32_t arg_idx,
@@ -388,9 +367,11 @@ int mlx5dr_arg_write_inline_arg_data(struct mlx5dr_context *ctx,
 	mlx5dr_send_engine_flush_queue(queue);
 
 	/* Poll for completion */
-	ret = mlx5dr_arg_poll_for_comp(ctx, ctx->queues - 1);
+	ret = mlx5dr_send_queue_action(ctx, ctx->queues - 1,
+				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC);
+
 	if (ret)
-		DR_LOG(ERR, "Failed to get completions for shared action");
+		DR_LOG(ERR, "Failed to drain arg queue");
 
 	pthread_spin_unlock(&ctx->ctrl_lock);
 
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.c b/drivers/net/mlx5/hws/mlx5dr_send.c
index 5c8bbe6fc6..a507e5f626 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.c
+++ b/drivers/net/mlx5/hws/mlx5dr_send.c
@@ -830,18 +830,30 @@ int mlx5dr_send_queue_action(struct mlx5dr_context *ctx,
 {
 	struct mlx5dr_send_ring_sq *send_sq;
 	struct mlx5dr_send_engine *queue;
+	bool wait_comp = false;
+	int64_t polled = 0;
 
 	queue = &ctx->send_queue[queue_id];
 	send_sq = &queue->send_ring->send_sq;
 
-	if (actions == MLX5DR_SEND_QUEUE_ACTION_DRAIN) {
+	switch (actions) {
+	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_SYNC:
+		wait_comp = true;
+		/* FALLTHROUGH */
+	case MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC:
 		if (send_sq->head_dep_idx != send_sq->tail_dep_idx)
 			/* Send dependent WQEs to drain the queue */
 			mlx5dr_send_all_dep_wqe(queue);
 		else
 			/* Signal on the last posted WQE */
 			mlx5dr_send_engine_flush_queue(queue);
-	} else {
+
+		/* Poll queue until empty */
+		while (wait_comp && !mlx5dr_send_engine_empty(queue))
+			mlx5dr_send_engine_poll_cqs(queue, NULL, &polled, 0);
+
+		break;
+	default:
 		rte_errno = -EINVAL;
 		return rte_errno;
 	}
diff --git a/drivers/net/mlx5/hws/mlx5dr_send.h b/drivers/net/mlx5/hws/mlx5dr_send.h
index 8d4769495d..fcddcc6366 100644
--- a/drivers/net/mlx5/hws/mlx5dr_send.h
+++ b/drivers/net/mlx5/hws/mlx5dr_send.h
@@ -240,6 +240,11 @@ void mlx5dr_send_ste(struct mlx5dr_send_engine *queue,
 
 void mlx5dr_send_engine_flush_queue(struct mlx5dr_send_engine *queue);
 
+static inline bool mlx5dr_send_engine_empty(struct mlx5dr_send_engine *queue)
+{
+	return (queue->send_ring->send_sq.cur_post == queue->send_ring->send_cq.poll_wqe);
+}
+
 static inline bool mlx5dr_send_engine_full(struct mlx5dr_send_engine *queue)
 {
 	return queue->used_entries >= queue->th_entries;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 20c71ff7f0..7e87d589cb 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2851,7 +2851,7 @@ flow_hw_push(struct rte_eth_dev *dev,
 
 	__flow_hw_push_action(dev, queue);
 	ret = mlx5dr_send_queue_action(priv->dr_ctx, queue,
-				       MLX5DR_SEND_QUEUE_ACTION_DRAIN);
+				       MLX5DR_SEND_QUEUE_ACTION_DRAIN_ASYNC);
 	if (ret) {
 		rte_flow_error_set(error, rte_errno,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-- 
2.18.1


  reply	other threads:[~2023-02-01  7:29 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-31  9:33 [v1 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-01-31  9:33 ` [v1 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-01-31  9:33 ` [v1 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-01-31  9:33 ` [v1 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-01-31  9:33 ` [v1 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-01-31  9:33 ` [v1 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-01-31  9:33 ` [v1 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-01-31  9:33 ` [v1 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-01-31  9:33 ` [v1 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-01-31  9:33 ` [v1 09/16] net/mlx5/hws: support range match Alex Vesker
2023-01-31  9:33 ` [v1 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-01-31  9:33 ` [v1 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-01-31  9:33 ` [v1 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-01-31  9:33 ` [v1 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-01-31  9:33 ` [v1 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-01-31  9:33 ` [v1 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-01-31  9:33 ` [v1 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-01  7:27 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-02-01  7:28   ` Alex Vesker [this message]
2023-02-01  7:28   ` [v2 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-02-01  7:28   ` [v2 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-02-01  7:28   ` [v2 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-02-01  7:28   ` [v2 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-02-01  7:28   ` [v2 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-02-01  7:28   ` [v2 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-02-01  7:28   ` [v2 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-02-01  7:28   ` [v2 09/16] net/mlx5/hws: support range match Alex Vesker
2023-02-01  7:28   ` [v2 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-02-01  7:28   ` [v2 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-02-01  7:28   ` [v2 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-02-01  7:28   ` [v2 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-02-01  7:28   ` [v2 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-02-01  7:28   ` [v2 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-02-01  7:28   ` [v2 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-06 15:07   ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Matan Azrad
2023-02-13  8:27   ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230201072815.1329101-2-valex@nvidia.com \
    --to=valex@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=erezsh@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).