DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH v2 2/5] net/mlx5: remove code duplication
Date: Wed, 8 Mar 2023 19:01:28 +0200	[thread overview]
Message-ID: <20230308170131.3195-3-getelson@nvidia.com> (raw)
In-Reply-To: <20230308170131.3195-1-getelson@nvidia.com>

Replace duplicated code with dedicated functions

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |   6 +-
 drivers/net/mlx5/mlx5_flow_hw.c | 182 ++++++++++++++++----------------
 2 files changed, 95 insertions(+), 93 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index aa956ec1b7..a4ed61e257 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -344,11 +344,11 @@ struct mlx5_lb_ctx {
 };
 
 /* HW steering queue job descriptor type. */
-enum {
+enum mlx5_hw_job_type {
 	MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */
 	MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
-	MLX5_HW_Q_JOB_TYPE_UPDATE,
-	MLX5_HW_Q_JOB_TYPE_QUERY,
+	MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */
+	MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */
 };
 
 #define MLX5_HW_MAX_ITEMS (16)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index cd951019de..8a5e8941fd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -7626,6 +7626,67 @@ flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
 	return 0;
 }
 
+static __rte_always_inline bool
+flow_hw_action_push(const struct rte_flow_op_attr *attr)
+{
+	return attr ? !attr->postpone : true;
+}
+
+static __rte_always_inline struct mlx5_hw_q_job *
+flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue)
+{
+	return priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
+}
+
+static __rte_always_inline void
+flow_hw_job_put(struct mlx5_priv *priv, uint32_t queue)
+{
+	priv->hw_q[queue].job_idx++;
+}
+
+static __rte_always_inline struct mlx5_hw_q_job *
+flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue,
+			const struct rte_flow_action_handle *handle,
+			void *user_data, void *query_data,
+			enum mlx5_hw_job_type type,
+			struct rte_flow_error *error)
+{
+	struct mlx5_hw_q_job *job;
+
+	MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
+	if (unlikely(!priv->hw_q[queue].job_idx)) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+				   "Action destroy failed due to queue full.");
+		return NULL;
+	}
+	job = flow_hw_job_get(priv, queue);
+	job->type = type;
+	job->action = handle;
+	job->user_data = user_data;
+	job->query.user = query_data;
+	return job;
+}
+
+static __rte_always_inline void
+flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue,
+			struct mlx5_hw_q_job *job,
+			bool push, bool aso, bool status)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	if (likely(status)) {
+		if (push)
+			__flow_hw_push_action(dev, queue);
+		if (!aso)
+			rte_ring_enqueue(push ?
+					 priv->hw_q[queue].indir_cq :
+					 priv->hw_q[queue].indir_iq,
+					 job);
+	} else {
+		flow_hw_job_put(priv, queue);
+	}
+}
+
 /**
  * Create shared action.
  *
@@ -7663,21 +7724,15 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	cnt_id_t cnt_id;
 	uint32_t mtr_id;
 	uint32_t age_idx;
-	bool push = true;
+	bool push = flow_hw_action_push(attr);
 	bool aso = false;
 
 	if (attr) {
-		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
-		if (unlikely(!priv->hw_q[queue].job_idx)) {
-			rte_flow_error_set(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Flow queue full.");
+		job = flow_hw_action_job_init(priv, queue, NULL, user_data,
+					      NULL, MLX5_HW_Q_JOB_TYPE_CREATE,
+					      error);
+		if (!job)
 			return NULL;
-		}
-		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
-		job->type = MLX5_HW_Q_JOB_TYPE_CREATE;
-		job->user_data = user_data;
-		push = !attr->postpone;
 	}
 	switch (action->type) {
 	case RTE_FLOW_ACTION_TYPE_AGE:
@@ -7740,17 +7795,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 		break;
 	}
 	if (job) {
-		if (!handle) {
-			priv->hw_q[queue].job_idx++;
-			return NULL;
-		}
 		job->action = handle;
-		if (push)
-			__flow_hw_push_action(dev, queue);
-		if (aso)
-			return handle;
-		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
-				 priv->hw_q[queue].indir_iq, job);
+		flow_hw_action_finalize(dev, queue, job, push, aso,
+					handle != NULL);
 	}
 	return handle;
 }
@@ -7798,19 +7845,15 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
 	uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
 	int ret = 0;
-	bool push = true;
+	bool push = flow_hw_action_push(attr);
 	bool aso = false;
 
 	if (attr) {
-		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
-		if (unlikely(!priv->hw_q[queue].job_idx))
-			return rte_flow_error_set(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Action update failed due to queue full.");
-		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
-		job->type = MLX5_HW_Q_JOB_TYPE_UPDATE;
-		job->user_data = user_data;
-		push = !attr->postpone;
+		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+					      NULL, MLX5_HW_Q_JOB_TYPE_UPDATE,
+					      error);
+		if (!job)
+			return -rte_errno;
 	}
 	switch (type) {
 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
@@ -7873,19 +7916,8 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
 					  "action type not supported");
 		break;
 	}
-	if (job) {
-		if (ret) {
-			priv->hw_q[queue].job_idx++;
-			return ret;
-		}
-		job->action = handle;
-		if (push)
-			__flow_hw_push_action(dev, queue);
-		if (aso)
-			return 0;
-		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
-				 priv->hw_q[queue].indir_iq, job);
-	}
+	if (job)
+		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
 	return ret;
 }
 
@@ -7924,20 +7956,16 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 	struct mlx5_hw_q_job *job = NULL;
 	struct mlx5_aso_mtr *aso_mtr;
 	struct mlx5_flow_meter_info *fm;
-	bool push = true;
+	bool push = flow_hw_action_push(attr);
 	bool aso = false;
 	int ret = 0;
 
 	if (attr) {
-		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
-		if (unlikely(!priv->hw_q[queue].job_idx))
-			return rte_flow_error_set(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Action destroy failed due to queue full.");
-		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
-		job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
-		job->user_data = user_data;
-		push = !attr->postpone;
+		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+					      NULL, MLX5_HW_Q_JOB_TYPE_DESTROY,
+					      error);
+		if (!job)
+			return -rte_errno;
 	}
 	switch (type) {
 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
@@ -8000,19 +8028,8 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 					  "action type not supported");
 		break;
 	}
-	if (job) {
-		if (ret) {
-			priv->hw_q[queue].job_idx++;
-			return ret;
-		}
-		job->action = handle;
-		if (push)
-			__flow_hw_push_action(dev, queue);
-		if (aso)
-			return ret;
-		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
-				 priv->hw_q[queue].indir_iq, job);
-	}
+	if (job)
+		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
 	return ret;
 }
 
@@ -8251,19 +8268,15 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 	uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
 	uint32_t age_idx = act_idx & MLX5_HWS_AGE_IDX_MASK;
 	int ret;
-	bool push = true;
+	bool push = flow_hw_action_push(attr);
 	bool aso = false;
 
 	if (attr) {
-		MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE);
-		if (unlikely(!priv->hw_q[queue].job_idx))
-			return rte_flow_error_set(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				"Action destroy failed due to queue full.");
-		job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx];
-		job->type = MLX5_HW_Q_JOB_TYPE_QUERY;
-		job->user_data = user_data;
-		push = !attr->postpone;
+		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+					      data, MLX5_HW_Q_JOB_TYPE_QUERY,
+					      error);
+		if (!job)
+			return -rte_errno;
 	}
 	switch (type) {
 	case MLX5_INDIRECT_ACTION_TYPE_AGE:
@@ -8286,19 +8299,8 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 					  "action type not supported");
 		break;
 	}
-	if (job) {
-		if (ret) {
-			priv->hw_q[queue].job_idx++;
-			return ret;
-		}
-		job->action = handle;
-		if (push)
-			__flow_hw_push_action(dev, queue);
-		if (aso)
-			return ret;
-		rte_ring_enqueue(push ? priv->hw_q[queue].indir_cq :
-				 priv->hw_q[queue].indir_iq, job);
-	}
+	if (job)
+		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
 	return 0;
 }
 
-- 
2.34.1


  parent reply	other threads:[~2023-03-08 17:02 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-18 12:55 [PATCH 0/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-01-18 12:55 ` [PATCH 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-01-18 12:55 ` [PATCH 2/5] net/mlx5: remove code duplication Gregory Etelson
2023-01-18 12:55 ` [PATCH 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-01-18 12:55 ` [PATCH 4/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-01-18 12:55 ` [PATCH 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-03-08  2:58 ` [PATCH 0/5] net/mlx5: add indirect QUOTA create/query/modify Suanming Mou
2023-03-08 17:01 ` [PATCH v2 " Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-03-08 17:01   ` Gregory Etelson [this message]
2023-03-08 17:01   ` [PATCH v2 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 4/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-05-07  7:39 ` [PATCH v3 0/5] net/mlx5: support indirect quota flow action Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 2/5] net/mlx5: remove code duplication Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 4/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-05-25 14:18   ` [PATCH v3 0/5] net/mlx5: support indirect quota flow action Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230308170131.3195-3-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).