DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <mkashani@nvidia.com>,
	<rasland@nvidia.com>, "Ori Kam" <orika@nvidia.com>,
	Matan Azrad <matan@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Suanming Mou <suanmingm@nvidia.com>
Subject: [PATCH 2/2] net/mlx5: fix indirect list actions completions processing
Date: Thu, 16 Nov 2023 10:08:33 +0200	[thread overview]
Message-ID: <20231116080833.336377-3-getelson@nvidia.com> (raw)
In-Reply-To: <20231116080833.336377-1-getelson@nvidia.com>

MLX5 PMD separates async HWS jobs completion to 2 categories:
- HWS flow rule completion;
- HWS indirect action completion;

When processing the latter, current PMD could not differentiate
between copletion to legacy indirect and indirect list actions.

The patch marks async job object with indirect action type and
processes job completion according to that type.

Current PMD supports 2 indirect action list types -
MIRROR and REFORMAT. These indirect list types do not post WQE to
create action. Therefore, the patch does not process
`MLX5_HW_INDIRECT_TYPE_LIST` jobs.

The new `indirect_type` member does not increase size of the
`struct mlx5_hw_q_job`.

Fixes: 3564e928c759 ("net/mlx5: support HWS flow mirror action")
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>

---
 drivers/net/mlx5/mlx5.h         |   6 ++
 drivers/net/mlx5/mlx5_flow_hw.c | 107 ++++++++++++++++++--------------
 2 files changed, 68 insertions(+), 45 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f0d63a0ba5..76bf7d0f4f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -382,11 +382,17 @@ enum mlx5_hw_job_type {
 	MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */
 };
 
+enum mlx5_hw_indirect_type {
+	MLX5_HW_INDIRECT_TYPE_LEGACY,
+	MLX5_HW_INDIRECT_TYPE_LIST
+};
+
 #define MLX5_HW_MAX_ITEMS (16)
 
 /* HW steering flow management job descriptor. */
 struct mlx5_hw_q_job {
 	uint32_t type; /* Job type. */
+	uint32_t indirect_type;
 	union {
 		struct rte_flow_hw *flow; /* Flow attached to the job. */
 		const void *action; /* Indirect action attached to the job. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index fb2e6bf67b..da873ae2e2 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -3740,6 +3740,56 @@ flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue,
 	}
 }
 
+static __rte_always_inline void
+flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job *job,
+				  uint32_t queue)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_ct_action *aso_ct;
+	struct mlx5_aso_mtr *aso_mtr;
+	uint32_t type, idx;
+
+	if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
+	    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
+		mlx5_quota_async_completion(dev, queue, job);
+	} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
+		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
+		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
+			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
+			mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
+		}
+	} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
+		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
+		if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
+			idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
+			aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
+			aso_mtr->state = ASO_METER_READY;
+		} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
+			idx = MLX5_ACTION_CTX_CT_GET_IDX
+			((uint32_t)(uintptr_t)job->action);
+			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
+			aso_ct->state = ASO_CONNTRACK_READY;
+		}
+	} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
+		type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
+		if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
+			idx = MLX5_ACTION_CTX_CT_GET_IDX
+			((uint32_t)(uintptr_t)job->action);
+			aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
+			mlx5_aso_ct_obj_analyze(job->query.user,
+						job->query.hw);
+			aso_ct->state = ASO_CONNTRACK_READY;
+		}
+	} else {
+		/*
+		 * rte_flow_op_result::user data can point to
+		 * struct mlx5_aso_mtr object as well
+		 */
+		if (queue != CTRL_QUEUE_ID(priv))
+			MLX5_ASSERT(false);
+	}
+}
+
 static inline int
 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 				 uint32_t queue,
@@ -3749,11 +3799,7 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_ring *r = priv->hw_q[queue].indir_cq;
-	struct mlx5_hw_q_job *job = NULL;
 	void *user_data = NULL;
-	uint32_t type, idx;
-	struct mlx5_aso_mtr *aso_mtr;
-	struct mlx5_aso_ct_action *aso_ct;
 	int ret_comp, i;
 
 	ret_comp = (int)rte_ring_count(r);
@@ -3775,49 +3821,18 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 						     &res[ret_comp],
 						     n_res - ret_comp);
 	for (i = 0; i <  ret_comp; i++) {
-		job = (struct mlx5_hw_q_job *)res[i].user_data;
+		struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)res[i].user_data;
+
 		/* Restore user data. */
 		res[i].user_data = job->user_data;
-		if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
-		    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
-			mlx5_quota_async_completion(dev, queue, job);
-		} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
-			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
-			if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
-				idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
-				mlx5_ipool_free(priv->hws_mpool->idx_pool, idx);
-			}
-		} else if (job->type == MLX5_HW_Q_JOB_TYPE_CREATE) {
-			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
-			if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
-				idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
-				aso_mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool, idx);
-				aso_mtr->state = ASO_METER_READY;
-			} else if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
-				idx = MLX5_ACTION_CTX_CT_GET_IDX
-					((uint32_t)(uintptr_t)job->action);
-				aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
-				aso_ct->state = ASO_CONNTRACK_READY;
-			}
-		} else if (job->type == MLX5_HW_Q_JOB_TYPE_QUERY) {
-			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
-			if (type == MLX5_INDIRECT_ACTION_TYPE_CT) {
-				idx = MLX5_ACTION_CTX_CT_GET_IDX
-					((uint32_t)(uintptr_t)job->action);
-				aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx);
-				mlx5_aso_ct_obj_analyze(job->query.user,
-							job->query.hw);
-				aso_ct->state = ASO_CONNTRACK_READY;
-			}
-		} else {
-			/*
-			 * rte_flow_op_result::user data can point to
-			 * struct mlx5_aso_mtr object as well
-			 */
-			if (queue == CTRL_QUEUE_ID(priv))
-				continue;
-			MLX5_ASSERT(false);
-		}
+		if (job->indirect_type == MLX5_HW_INDIRECT_TYPE_LEGACY)
+			flow_hw_pull_legacy_indirect_comp(dev, job, queue);
+		/*
+		 * Current PMD supports 2 indirect action list types - MIRROR and REFORMAT.
+		 * These indirect list types do not post WQE to create action.
+		 * Future indirect list types that do post WQE will add
+		 * completion handlers here.
+		 */
 		flow_hw_job_put(priv, job, queue);
 	}
 	return ret_comp;
@@ -10109,6 +10124,7 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	}
 	if (job) {
 		job->action = handle;
+		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LEGACY;
 		flow_hw_action_finalize(dev, queue, job, push, aso,
 					handle != NULL);
 	}
@@ -11341,6 +11357,7 @@ flow_hw_async_action_list_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	}
 	if (job) {
 		job->action = handle;
+		job->indirect_type = MLX5_HW_INDIRECT_TYPE_LIST;
 		flow_hw_action_finalize(dev, queue, job, push, false,
 					handle != NULL);
 	}
-- 
2.39.2


  parent reply	other threads:[~2023-11-16  8:09 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-16  8:08 [PATCH 0/2] net/mlx5: fix " Gregory Etelson
2023-11-16  8:08 ` [PATCH 1/2] net/mlx5: fix sync queue completion processing Gregory Etelson
2023-11-16  8:08 ` Gregory Etelson [this message]
2023-11-16 13:01 ` [PATCH 0/2] net/mlx5: fix completions processing Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231116080833.336377-3-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=mkashani@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=suanmingm@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).