DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH v2 4/5] net/mlx5: add indirect QUOTA create/query/modify
Date: Wed, 8 Mar 2023 19:01:30 +0200	[thread overview]
Message-ID: <20230308170131.3195-5-getelson@nvidia.com> (raw)
In-Reply-To: <20230308170131.3195-1-getelson@nvidia.com>

Implement HWS functions for indirect QUOTA creation, modification and
query.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/meson.build     |   1 +
 drivers/net/mlx5/mlx5.h          |  72 +++++++
 drivers/net/mlx5/mlx5_flow.c     |  62 ++++++
 drivers/net/mlx5/mlx5_flow.h     |  20 +-
 drivers/net/mlx5/mlx5_flow_aso.c |   8 +-
 drivers/net/mlx5/mlx5_flow_hw.c  | 343 ++++++++++++++++++++++++-------
 6 files changed, 425 insertions(+), 81 deletions(-)

diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index abd507bd88..323c381d2b 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -23,6 +23,7 @@ sources = files(
         'mlx5_flow_dv.c',
         'mlx5_flow_aso.c',
         'mlx5_flow_flex.c',
+        'mlx5_flow_quota.c',
         'mlx5_mac.c',
         'mlx5_rss.c',
         'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a4ed61e257..6e6f2f53eb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -46,6 +46,14 @@
 
 #define MLX5_HW_INV_QUEUE UINT32_MAX
 
+/*
+ * The default ipool threshold value indicates which per_core_cache
+ * value to set.
+ */
+#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19)
+/* The default min local cache size. */
+#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9)
+
 /*
  * Number of modification commands.
  * The maximal actions amount in FW is some constant, and it is 16 in the
@@ -349,6 +357,7 @@ enum mlx5_hw_job_type {
 	MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */
 	MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */
 	MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */
+	MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */
 };
 
 #define MLX5_HW_MAX_ITEMS (16)
@@ -601,6 +610,7 @@ struct mlx5_aso_sq_elem {
 			char *query_data;
 		};
 		void *user_data;
+		struct mlx5_quota *quota_obj;
 	};
 };
 
@@ -1658,6 +1668,33 @@ struct mlx5_hw_ctrl_flow {
 
 struct mlx5_flow_hw_ctrl_rx;
 
+enum mlx5_quota_state {
+	MLX5_QUOTA_STATE_FREE,	/* quota not in use */
+	MLX5_QUOTA_STATE_READY, /* quota is ready   */
+	MLX5_QUOTA_STATE_WAIT	/* quota waits WR completion */
+};
+
+struct mlx5_quota {
+	uint8_t state; /* object state */
+	uint8_t mode;  /* metering mode */
+	/**
+	 * Keep track of application update types.
+	 * PMD does not allow 2 consecutive ADD updates.
+	 */
+	enum rte_flow_update_quota_op last_update;
+};
+
+/* Bulk management structure for flow quota. */
+struct mlx5_quota_ctx {
+	uint32_t nb_quotas; /* Total number of quota objects */
+	struct mlx5dr_action *dr_action; /* HWS action */
+	struct mlx5_devx_obj *devx_obj; /* DEVX ranged object. */
+	struct mlx5_pmd_mr mr; /* MR for READ from MTR ASO */
+	struct mlx5_aso_mtr_dseg **read_buf; /* Buffers for READ */
+	struct mlx5_aso_sq *sq; /* SQs for sync/async ACCESS_ASO WRs */
+	struct mlx5_indexed_pool *quota_ipool; /* Manage quota objects */
+};
+
 struct mlx5_priv {
 	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
 	struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
@@ -1747,6 +1784,7 @@ struct mlx5_priv {
 	struct mlx5_flow_meter_policy *mtr_policy_arr; /* Policy array. */
 	struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */
 	struct mlx5_mtr_bulk mtr_bulk; /* Meter index mapping for HWS */
+	struct mlx5_quota_ctx quota_ctx; /* Quota index mapping for HWS */
 	uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */
 	uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
 	struct mlx5_mp_id mp_id; /* ID of a multi-process process */
@@ -2242,6 +2280,15 @@ int mlx5_aso_ct_queue_init(struct mlx5_dev_ctx_shared *sh,
 			   uint32_t nb_queues);
 int mlx5_aso_ct_queue_uninit(struct mlx5_dev_ctx_shared *sh,
 			     struct mlx5_aso_ct_pools_mng *ct_mng);
+int
+mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
+		   void *uar, uint16_t log_desc_n);
+void
+mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq);
+void
+mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq);
+void
+mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq);
 
 /* mlx5_flow_flex.c */
 
@@ -2273,6 +2320,31 @@ struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
 void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
 				    struct mlx5_list_entry *entry);
 
+int
+mlx5_flow_quota_destroy(struct rte_eth_dev *dev);
+int
+mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas);
+struct rte_flow_action_handle *
+mlx5_quota_alloc(struct rte_eth_dev *dev, uint32_t queue,
+		 const struct rte_flow_action_quota *conf,
+		 struct mlx5_hw_q_job *job, bool push,
+		 struct rte_flow_error *error);
+void
+mlx5_quota_async_completion(struct rte_eth_dev *dev, uint32_t queue,
+			    struct mlx5_hw_q_job *job);
+int
+mlx5_quota_query_update(struct rte_eth_dev *dev, uint32_t queue,
+			struct rte_flow_action_handle *handle,
+			const struct rte_flow_action *update,
+			struct rte_flow_query_quota *query,
+			struct mlx5_hw_q_job *async_job, bool push,
+			struct rte_flow_error *error);
+int mlx5_quota_query(struct rte_eth_dev *dev, uint32_t queue,
+		     const struct rte_flow_action_handle *handle,
+		     struct rte_flow_query_quota *query,
+		     struct mlx5_hw_q_job *async_job, bool push,
+		     struct rte_flow_error *error);
+
 int mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev);
 
 void mlx5_free_srh_flex_parser(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a6a426caf7..682f942dc4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1075,6 +1075,20 @@ mlx5_flow_async_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 				 void *data,
 				 void *user_data,
 				 struct rte_flow_error *error);
+static int
+mlx5_action_handle_query_update(struct rte_eth_dev *dev,
+				struct rte_flow_action_handle *handle,
+				const void *update, void *query,
+				enum rte_flow_query_update_mode qu_mode,
+				struct rte_flow_error *error);
+static int
+mlx5_flow_async_action_handle_query_update
+	(struct rte_eth_dev *dev, uint32_t queue_id,
+	 const struct rte_flow_op_attr *op_attr,
+	 struct rte_flow_action_handle *action_handle,
+	 const void *update, void *query,
+	 enum rte_flow_query_update_mode qu_mode,
+	 void *user_data, struct rte_flow_error *error);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -1090,6 +1104,7 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.action_handle_destroy = mlx5_action_handle_destroy,
 	.action_handle_update = mlx5_action_handle_update,
 	.action_handle_query = mlx5_action_handle_query,
+	.action_handle_query_update = mlx5_action_handle_query_update,
 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
 	.tunnel_match = mlx5_flow_tunnel_match,
 	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
@@ -1112,6 +1127,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.push = mlx5_flow_push,
 	.async_action_handle_create = mlx5_flow_async_action_handle_create,
 	.async_action_handle_update = mlx5_flow_async_action_handle_update,
+	.async_action_handle_query_update =
+		mlx5_flow_async_action_handle_query_update,
 	.async_action_handle_query = mlx5_flow_async_action_handle_query,
 	.async_action_handle_destroy = mlx5_flow_async_action_handle_destroy,
 };
@@ -9092,6 +9109,27 @@ mlx5_flow_async_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
 					 update, user_data, error);
 }
 
+static int
+mlx5_flow_async_action_handle_query_update
+	(struct rte_eth_dev *dev, uint32_t queue_id,
+	 const struct rte_flow_op_attr *op_attr,
+	 struct rte_flow_action_handle *action_handle,
+	 const void *update, void *query,
+	 enum rte_flow_query_update_mode qu_mode,
+	 void *user_data, struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops =
+		flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
+
+	if (!fops || !fops->async_action_query_update)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					  "async query_update not supported");
+	return fops->async_action_query_update
+			   (dev, queue_id, op_attr, action_handle,
+			    update, query, qu_mode, user_data, error);
+}
+
 /**
  * Query shared action.
  *
@@ -10230,6 +10268,30 @@ mlx5_action_handle_query(struct rte_eth_dev *dev,
 	return flow_drv_action_query(dev, handle, data, fops, error);
 }
 
+static int
+mlx5_action_handle_query_update(struct rte_eth_dev *dev,
+				struct rte_flow_action_handle *handle,
+				const void *update, void *query,
+				enum rte_flow_query_update_mode qu_mode,
+				struct rte_flow_error *error)
+{
+	struct rte_flow_attr attr = { .transfer = 0 };
+	enum mlx5_flow_drv_type drv_type = flow_get_drv_type(dev, &attr);
+	const struct mlx5_flow_driver_ops *fops;
+
+	if (drv_type == MLX5_FLOW_TYPE_MIN || drv_type == MLX5_FLOW_TYPE_MAX)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION,
+					  NULL, "invalid driver type");
+	fops = flow_get_drv_ops(drv_type);
+	if (!fops || !fops->action_query_update)
+		return rte_flow_error_set(error, ENOTSUP,
+					  RTE_FLOW_ERROR_TYPE_ACTION,
+					  NULL, "no query_update handler");
+	return fops->action_query_update(dev, handle, update,
+					 query, qu_mode, error);
+}
+
 /**
  * Destroy all indirect actions (shared RSS).
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4bef2296b8..3ba178bd6c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -70,6 +70,7 @@ enum {
 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
 	MLX5_INDIRECT_ACTION_TYPE_CT,
 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
+	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
 };
 
 /* Now, the maximal ports will be supported is 16, action number is 32M. */
@@ -218,6 +219,8 @@ enum mlx5_feature_name {
 
 /* Meter color item */
 #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
+#define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
+
 
 /* IPv6 routing extension item */
 #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
@@ -307,6 +310,7 @@ enum mlx5_feature_name {
 #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
 #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
 #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
+#define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
 
 #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
@@ -1703,6 +1707,12 @@ typedef int (*mlx5_flow_action_query_t)
 			 const struct rte_flow_action_handle *action,
 			 void *data,
 			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_action_query_update_t)
+			(struct rte_eth_dev *dev,
+			 struct rte_flow_action_handle *handle,
+			 const void *update, void *data,
+			 enum rte_flow_query_update_mode qu_mode,
+			 struct rte_flow_error *error);
 typedef int (*mlx5_flow_sync_domain_t)
 			(struct rte_eth_dev *dev,
 			 uint32_t domains,
@@ -1849,7 +1859,13 @@ typedef int (*mlx5_flow_async_action_handle_update_t)
 			 const void *update,
 			 void *user_data,
 			 struct rte_flow_error *error);
-
+typedef int (*mlx5_flow_async_action_handle_query_update_t)
+			(struct rte_eth_dev *dev, uint32_t queue_id,
+			 const struct rte_flow_op_attr *op_attr,
+			 struct rte_flow_action_handle *action_handle,
+			 const void *update, void *data,
+			 enum rte_flow_query_update_mode qu_mode,
+			 void *user_data, struct rte_flow_error *error);
 typedef int (*mlx5_flow_async_action_handle_query_t)
 			(struct rte_eth_dev *dev,
 			 uint32_t queue,
@@ -1900,6 +1916,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_action_destroy_t action_destroy;
 	mlx5_flow_action_update_t action_update;
 	mlx5_flow_action_query_t action_query;
+	mlx5_flow_action_query_update_t action_query_update;
 	mlx5_flow_sync_domain_t sync_domain;
 	mlx5_flow_discover_priorities_t discover_priorities;
 	mlx5_flow_item_create_t item_create;
@@ -1921,6 +1938,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_push_t push;
 	mlx5_flow_async_action_handle_create_t async_action_create;
 	mlx5_flow_async_action_handle_update_t async_action_update;
+	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
 	mlx5_flow_async_action_handle_query_t async_action_query;
 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
 };
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index 0eb91c570f..3c08da0614 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -74,7 +74,7 @@ mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
  * @param[in] sq
  *   ASO SQ to destroy.
  */
-static void
+void
 mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq)
 {
 	mlx5_devx_sq_destroy(&sq->sq_obj);
@@ -148,7 +148,7 @@ mlx5_aso_age_init_sq(struct mlx5_aso_sq *sq)
  * @param[in] sq
  *   ASO SQ to initialize.
  */
-static void
+void
 mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq)
 {
 	volatile struct mlx5_aso_wqe *restrict wqe;
@@ -219,7 +219,7 @@ mlx5_aso_ct_init_sq(struct mlx5_aso_sq *sq)
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq,
 		   void *uar, uint16_t log_desc_n)
 {
@@ -504,7 +504,7 @@ mlx5_aso_dump_err_objs(volatile uint32_t *cqe, volatile uint32_t *wqe)
  * @param[in] sq
  *   ASO SQ to use.
  */
-static void
+void
 mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq)
 {
 	struct mlx5_aso_cq *cq = &sq->cq;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 8a5e8941fd..0343d0a891 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -70,6 +70,9 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 			       struct mlx5_action_construct_data *act_data,
 			       const struct mlx5_hw_actions *hw_acts,
 			       const struct rte_flow_action *action);
+static void
+flow_hw_construct_quota(struct mlx5_priv *priv,
+			struct mlx5dr_rule_action *rule_act, uint32_t qid);
 
 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev);
 static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev);
@@ -797,6 +800,9 @@ flow_hw_shared_action_translate(struct rte_eth_dev *dev,
 			action_src, action_dst, idx))
 			return -1;
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		flow_hw_construct_quota(priv, &acts->rule_acts[action_dst], idx);
+		break;
 	default:
 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
 		break;
@@ -1840,6 +1846,16 @@ flow_hw_shared_action_get(struct rte_eth_dev *dev,
 	return -1;
 }
 
+static void
+flow_hw_construct_quota(struct mlx5_priv *priv,
+			struct mlx5dr_rule_action *rule_act, uint32_t qid)
+{
+	rule_act->action = priv->quota_ctx.dr_action;
+	rule_act->aso_meter.offset = qid - 1;
+	rule_act->aso_meter.init_color =
+		MLX5DR_ACTION_ASO_METER_COLOR_GREEN;
+}
+
 /**
  * Construct shared indirect action.
  *
@@ -1963,6 +1979,9 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
 			(enum mlx5dr_action_aso_meter_color)
 			rte_col_2_mlx5_col(aso_mtr->init_color);
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		flow_hw_construct_quota(priv, rule_act, idx);
+		break;
 	default:
 		DRV_LOG(WARNING, "Unsupported shared action type:%d", type);
 		break;
@@ -2269,6 +2288,11 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 			rule_acts[act_data->action_dst].action =
 					priv->hw_vport[port_action->port_id];
 			break;
+		case RTE_FLOW_ACTION_TYPE_QUOTA:
+			flow_hw_construct_quota(priv,
+						rule_acts + act_data->action_dst,
+						act_data->shared_meter.id);
+			break;
 		case RTE_FLOW_ACTION_TYPE_METER:
 			meter = action->conf;
 			mtr_id = meter->mtr_id;
@@ -2710,11 +2734,18 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 	if (ret_comp < n_res && priv->hws_ctpool)
 		ret_comp += mlx5_aso_pull_completion(&priv->ct_mng->aso_sqs[queue],
 				&res[ret_comp], n_res - ret_comp);
+	if (ret_comp < n_res && priv->quota_ctx.sq)
+		ret_comp += mlx5_aso_pull_completion(&priv->quota_ctx.sq[queue],
+						     &res[ret_comp],
+						     n_res - ret_comp);
 	for (i = 0; i <  ret_comp; i++) {
 		job = (struct mlx5_hw_q_job *)res[i].user_data;
 		/* Restore user data. */
 		res[i].user_data = job->user_data;
-		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
+		if (MLX5_INDIRECT_ACTION_TYPE_GET(job->action) ==
+		    MLX5_INDIRECT_ACTION_TYPE_QUOTA) {
+			mlx5_quota_async_completion(dev, queue, job);
+		} else if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
 			type = MLX5_INDIRECT_ACTION_TYPE_GET(job->action);
 			if (type == MLX5_INDIRECT_ACTION_TYPE_METER_MARK) {
 				idx = MLX5_INDIRECT_ACTION_IDX_GET(job->action);
@@ -3695,6 +3726,10 @@ flow_hw_validate_action_indirect(struct rte_eth_dev *dev,
 			return ret;
 		*action_flags |= MLX5_FLOW_ACTION_INDIRECT_AGE;
 		break;
+	case RTE_FLOW_ACTION_TYPE_QUOTA:
+		/* TODO: add proper quota verification */
+		*action_flags |= MLX5_FLOW_ACTION_QUOTA;
+		break;
 	default:
 		DRV_LOG(WARNING, "Unsupported shared action type: %d", type);
 		return rte_flow_error_set(error, ENOTSUP,
@@ -3732,19 +3767,17 @@ flow_hw_validate_action_raw_encap(struct rte_eth_dev *dev __rte_unused,
 }
 
 static inline uint16_t
-flow_hw_template_expand_modify_field(const struct rte_flow_action actions[],
-				     const struct rte_flow_action masks[],
-				     const struct rte_flow_action *mf_action,
-				     const struct rte_flow_action *mf_mask,
-				     struct rte_flow_action *new_actions,
-				     struct rte_flow_action *new_masks,
-				     uint64_t flags, uint32_t act_num)
+flow_hw_template_expand_modify_field(struct rte_flow_action actions[],
+				     struct rte_flow_action masks[],
+				     const struct rte_flow_action *mf_actions,
+				     const struct rte_flow_action *mf_masks,
+				     uint64_t flags, uint32_t act_num,
+				     uint32_t mf_num)
 {
 	uint32_t i, tail;
 
 	MLX5_ASSERT(actions && masks);
-	MLX5_ASSERT(new_actions && new_masks);
-	MLX5_ASSERT(mf_action && mf_mask);
+	MLX5_ASSERT(mf_num > 0);
 	if (flags & MLX5_FLOW_ACTION_MODIFY_FIELD) {
 		/*
 		 * Application action template already has Modify Field.
@@ -3795,12 +3828,10 @@ flow_hw_template_expand_modify_field(const struct rte_flow_action actions[],
 	i = 0;
 insert:
 	tail = act_num - i; /* num action to move */
-	memcpy(new_actions, actions, sizeof(actions[0]) * i);
-	new_actions[i] = *mf_action;
-	memcpy(new_actions + i + 1, actions + i, sizeof(actions[0]) * tail);
-	memcpy(new_masks, masks, sizeof(masks[0]) * i);
-	new_masks[i] = *mf_mask;
-	memcpy(new_masks + i + 1, masks + i, sizeof(masks[0]) * tail);
+	memmove(actions + i + mf_num, actions + i, sizeof(actions[0]) * tail);
+	memcpy(actions + i, mf_actions, sizeof(actions[0]) * mf_num);
+	memmove(masks + i + mf_num, masks + i, sizeof(masks[0]) * tail);
+	memcpy(masks + i, mf_masks, sizeof(masks[0]) * mf_num);
 	return i;
 }
 
@@ -4110,6 +4141,7 @@ flow_hw_dr_actions_template_handle_shared(const struct rte_flow_action *mask,
 		action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_CT;
 		*curr_off = *curr_off + 1;
 		break;
+	case RTE_FLOW_ACTION_TYPE_QUOTA:
 	case RTE_FLOW_ACTION_TYPE_METER_MARK:
 		at->actions_off[action_src] = *curr_off;
 		action_types[*curr_off] = MLX5DR_ACTION_TYP_ASO_METER;
@@ -4339,6 +4371,96 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
 					      &modify_action);
 }
 
+static __rte_always_inline void
+flow_hw_actions_template_replace_container(const
+					   struct rte_flow_action *actions,
+					   const
+					   struct rte_flow_action *masks,
+					   struct rte_flow_action *new_actions,
+					   struct rte_flow_action *new_masks,
+					   struct rte_flow_action **ra,
+					   struct rte_flow_action **rm,
+					   uint32_t act_num)
+{
+	memcpy(new_actions, actions, sizeof(actions[0]) * act_num);
+	memcpy(new_masks, masks, sizeof(masks[0]) * act_num);
+	*ra = (void *)(uintptr_t)new_actions;
+	*rm = (void *)(uintptr_t)new_masks;
+}
+
+#define RX_META_COPY_ACTION ((const struct rte_flow_action) {    \
+	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,               \
+	.conf = &(struct rte_flow_action_modify_field){          \
+		.operation = RTE_FLOW_MODIFY_SET,                \
+		.dst = {                                         \
+			.field = (enum rte_flow_field_id)        \
+				MLX5_RTE_FLOW_FIELD_META_REG,    \
+			.level = REG_B,                          \
+		},                                               \
+		.src = {                                         \
+			.field = (enum rte_flow_field_id)        \
+				MLX5_RTE_FLOW_FIELD_META_REG,    \
+			.level = REG_C_1,                        \
+		},                                               \
+		.width = 32,                                     \
+	}                                                        \
+})
+
+#define RX_META_COPY_MASK ((const struct rte_flow_action) {      \
+	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,               \
+	.conf = &(struct rte_flow_action_modify_field){          \
+		.operation = RTE_FLOW_MODIFY_SET,                \
+		.dst = {                                         \
+			.field = (enum rte_flow_field_id)        \
+				MLX5_RTE_FLOW_FIELD_META_REG,    \
+			.level = UINT32_MAX,                     \
+			.offset = UINT32_MAX,                    \
+		},                                               \
+		.src = {                                         \
+			.field = (enum rte_flow_field_id)        \
+				MLX5_RTE_FLOW_FIELD_META_REG,    \
+			.level = UINT32_MAX,                     \
+			.offset = UINT32_MAX,                    \
+		},                                               \
+		.width = UINT32_MAX,                             \
+	}                                                        \
+})
+
+#define QUOTA_COLOR_INC_ACTION ((const struct rte_flow_action) {      \
+	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,                    \
+	.conf = &(struct rte_flow_action_modify_field) {              \
+		.operation = RTE_FLOW_MODIFY_ADD,                     \
+		.dst = {                                              \
+			.field = RTE_FLOW_FIELD_METER_COLOR,          \
+			.level = 0, .offset = 0                       \
+		},                                                    \
+		.src = {                                              \
+			.field = RTE_FLOW_FIELD_VALUE,                \
+			.level = 1,                                   \
+			.offset = 0,                                  \
+		},                                                    \
+		.width = 2                                            \
+	}                                                             \
+})
+
+#define QUOTA_COLOR_INC_MASK ((const struct rte_flow_action) {        \
+	.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,                    \
+	.conf = &(struct rte_flow_action_modify_field) {              \
+		.operation = RTE_FLOW_MODIFY_ADD,                     \
+		.dst = {                                              \
+			.field = RTE_FLOW_FIELD_METER_COLOR,          \
+			.level = UINT32_MAX,                          \
+			.offset = UINT32_MAX,                         \
+		},                                                    \
+		.src = {                                              \
+			.field = RTE_FLOW_FIELD_VALUE,                \
+			.level = 3,                                   \
+			.offset = 0                                   \
+		},                                                    \
+		.width = UINT32_MAX                                   \
+	}                                                             \
+})
+
 /**
  * Create flow action template.
  *
@@ -4377,40 +4499,9 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 	int set_vlan_vid_ix = -1;
 	struct rte_flow_action_modify_field set_vlan_vid_spec = {0, };
 	struct rte_flow_action_modify_field set_vlan_vid_mask = {0, };
-	const struct rte_flow_action_modify_field rx_mreg = {
-		.operation = RTE_FLOW_MODIFY_SET,
-		.dst = {
-			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
-			.level = REG_B,
-		},
-		.src = {
-			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
-			.level = REG_C_1,
-		},
-		.width = 32,
-	};
-	const struct rte_flow_action_modify_field rx_mreg_mask = {
-		.operation = RTE_FLOW_MODIFY_SET,
-		.dst = {
-			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
-			.level = UINT32_MAX,
-			.offset = UINT32_MAX,
-		},
-		.src = {
-			.field = (enum rte_flow_field_id)MLX5_RTE_FLOW_FIELD_META_REG,
-			.level = UINT32_MAX,
-			.offset = UINT32_MAX,
-		},
-		.width = UINT32_MAX,
-	};
-	const struct rte_flow_action rx_cpy = {
-		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
-		.conf = &rx_mreg,
-	};
-	const struct rte_flow_action rx_cpy_mask = {
-		.type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD,
-		.conf = &rx_mreg_mask,
-	};
+	struct rte_flow_action mf_actions[MLX5_HW_MAX_ACTS];
+	struct rte_flow_action mf_masks[MLX5_HW_MAX_ACTS];
+	uint32_t expand_mf_num = 0;
 
 	if (mlx5_flow_hw_actions_validate(dev, attr, actions, masks,
 					  &action_flags, error))
@@ -4440,44 +4531,57 @@ flow_hw_actions_template_create(struct rte_eth_dev *dev,
 				   RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Too many actions");
 		return NULL;
 	}
+	if (set_vlan_vid_ix != -1) {
+		/* If temporary action buffer was not used, copy template actions to it */
+		if (ra == actions)
+			flow_hw_actions_template_replace_container(actions,
+								   masks,
+								   tmp_action,
+								   tmp_mask,
+								   &ra, &rm,
+								   act_num);
+		flow_hw_set_vlan_vid(dev, ra, rm,
+				     &set_vlan_vid_spec, &set_vlan_vid_mask,
+				     set_vlan_vid_ix);
+		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+	}
+	if (action_flags & MLX5_FLOW_ACTION_QUOTA) {
+		mf_actions[expand_mf_num] = QUOTA_COLOR_INC_ACTION;
+		mf_masks[expand_mf_num] = QUOTA_COLOR_INC_MASK;
+		expand_mf_num++;
+	}
 	if (priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
 	    priv->sh->config.dv_esw_en &&
 	    (action_flags & (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS))) {
 		/* Insert META copy */
-		if (act_num + 1 > MLX5_HW_MAX_ACTS) {
+		mf_actions[expand_mf_num] = RX_META_COPY_ACTION;
+		mf_masks[expand_mf_num] = RX_META_COPY_MASK;
+		expand_mf_num++;
+	}
+	if (expand_mf_num) {
+		if (act_num + expand_mf_num > MLX5_HW_MAX_ACTS) {
 			rte_flow_error_set(error, E2BIG,
 					   RTE_FLOW_ERROR_TYPE_ACTION,
 					   NULL, "cannot expand: too many actions");
 			return NULL;
 		}
+		if (ra == actions)
+			flow_hw_actions_template_replace_container(actions,
+								   masks,
+								   tmp_action,
+								   tmp_mask,
+								   &ra, &rm,
+								   act_num);
 		/* Application should make sure only one Q/RSS exist in one rule. */
-		pos = flow_hw_template_expand_modify_field(actions, masks,
-							   &rx_cpy,
-							   &rx_cpy_mask,
-							   tmp_action, tmp_mask,
+		pos = flow_hw_template_expand_modify_field(ra, rm,
+							   mf_actions,
+							   mf_masks,
 							   action_flags,
-							   act_num);
-		ra = tmp_action;
-		rm = tmp_mask;
-		act_num++;
+							   act_num,
+							   expand_mf_num);
+		act_num += expand_mf_num;
 		action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
 	}
-	if (set_vlan_vid_ix != -1) {
-		/* If temporary action buffer was not used, copy template actions to it */
-		if (ra == actions && rm == masks) {
-			for (i = 0; i < act_num; ++i) {
-				tmp_action[i] = actions[i];
-				tmp_mask[i] = masks[i];
-				if (actions[i].type == RTE_FLOW_ACTION_TYPE_END)
-					break;
-			}
-			ra = tmp_action;
-			rm = tmp_mask;
-		}
-		flow_hw_set_vlan_vid(dev, ra, rm,
-				     &set_vlan_vid_spec, &set_vlan_vid_mask,
-				     set_vlan_vid_ix);
-	}
 	act_len = rte_flow_conv(RTE_FLOW_CONV_OP_ACTIONS, NULL, 0, ra, error);
 	if (act_len <= 0)
 		return NULL;
@@ -4740,6 +4844,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ITEM_TYPE_ICMP:
 		case RTE_FLOW_ITEM_TYPE_ICMP6:
 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
+		case RTE_FLOW_ITEM_TYPE_QUOTA:
 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
@@ -7017,6 +7122,12 @@ flow_hw_configure(struct rte_eth_dev *dev,
 				   "Failed to set up Rx control flow templates");
 		goto err;
 	}
+	/* Initialize quotas */
+	if (port_attr->nb_quotas) {
+		ret = mlx5_flow_quota_init(dev, port_attr->nb_quotas);
+		if (ret)
+			goto err;
+	}
 	/* Initialize meter library*/
 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 1, 1, nb_q_updated))
@@ -7116,6 +7227,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
 		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
 		priv->hws_cpool = NULL;
 	}
+	mlx5_flow_quota_destroy(dev);
 	flow_hw_free_vport_actions(priv);
 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
 		if (priv->hw_drop[i])
@@ -7213,6 +7325,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
 		priv->ct_mng = NULL;
 	}
+	mlx5_flow_quota_destroy(dev);
 	for (i = 0; i < priv->nb_queue; i++) {
 		rte_ring_free(priv->hw_q[i].indir_iq);
 		rte_ring_free(priv->hw_q[i].indir_cq);
@@ -7618,6 +7731,8 @@ flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue,
 		return flow_hw_validate_action_meter_mark(dev, action, error);
 	case RTE_FLOW_ACTION_TYPE_RSS:
 		return flow_dv_action_validate(dev, conf, action, error);
+	case RTE_FLOW_ACTION_TYPE_QUOTA:
+		return 0;
 	default:
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -7789,6 +7904,11 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue,
 	case RTE_FLOW_ACTION_TYPE_RSS:
 		handle = flow_dv_action_create(dev, conf, action, error);
 		break;
+	case RTE_FLOW_ACTION_TYPE_QUOTA:
+		aso = true;
+		handle = mlx5_quota_alloc(dev, queue, action->conf,
+					  job, push, error);
+		break;
 	default:
 		rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
 				   NULL, "action type not supported");
@@ -7909,6 +8029,11 @@ flow_hw_action_handle_update(struct rte_eth_dev *dev, uint32_t queue,
 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
 		ret = flow_dv_action_update(dev, handle, update, error);
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		aso = true;
+		ret = mlx5_quota_query_update(dev, queue, handle, update, NULL,
+					      job, push, error);
+		break;
 	default:
 		ret = -ENOTSUP;
 		rte_flow_error_set(error, ENOTSUP,
@@ -8021,6 +8146,8 @@ flow_hw_action_handle_destroy(struct rte_eth_dev *dev, uint32_t queue,
 	case MLX5_INDIRECT_ACTION_TYPE_RSS:
 		ret = flow_dv_action_destroy(dev, handle, error);
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		break;
 	default:
 		ret = -ENOTSUP;
 		rte_flow_error_set(error, ENOTSUP,
@@ -8292,6 +8419,11 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 		ret = flow_hw_conntrack_query(dev, queue, act_idx, data,
 					      job, push, error);
 		break;
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		aso = true;
+		ret = mlx5_quota_query(dev, queue, handle, data,
+				       job, push, error);
+		break;
 	default:
 		ret = -ENOTSUP;
 		rte_flow_error_set(error, ENOTSUP,
@@ -8301,7 +8433,51 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue,
 	}
 	if (job)
 		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
-	return 0;
+	return ret;
+}
+
+static int
+flow_hw_async_action_handle_query_update
+			(struct rte_eth_dev *dev, uint32_t queue,
+			 const struct rte_flow_op_attr *attr,
+			 struct rte_flow_action_handle *handle,
+			 const void *update, void *query,
+			 enum rte_flow_query_update_mode qu_mode,
+			 void *user_data, struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	bool push = flow_hw_action_push(attr);
+	bool aso = false;
+	struct mlx5_hw_q_job *job = NULL;
+	int ret = 0;
+
+	if (attr) {
+		job = flow_hw_action_job_init(priv, queue, handle, user_data,
+					      query,
+					      MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY,
+					      error);
+		if (!job)
+			return -rte_errno;
+	}
+	switch (MLX5_INDIRECT_ACTION_TYPE_GET(handle)) {
+	case MLX5_INDIRECT_ACTION_TYPE_QUOTA:
+		if (qu_mode != RTE_FLOW_QU_QUERY_FIRST) {
+			ret = rte_flow_error_set
+				(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				 NULL, "quota action must query before update");
+			break;
+		}
+		aso = true;
+		ret = mlx5_quota_query_update(dev, queue, handle,
+					      update, query, job, push, error);
+		break;
+	default:
+		ret = rte_flow_error_set(error, ENOTSUP,
+					 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL, "update and query not supportred");
+	}
+	if (job)
+		flow_hw_action_finalize(dev, queue, job, push, aso, ret == 0);
+	return ret;
 }
 
 static int
@@ -8313,6 +8489,19 @@ flow_hw_action_query(struct rte_eth_dev *dev,
 			handle, data, NULL, error);
 }
 
+static int
+flow_hw_action_query_update(struct rte_eth_dev *dev,
+			    struct rte_flow_action_handle *handle,
+			    const void *update, void *query,
+			    enum rte_flow_query_update_mode qu_mode,
+			    struct rte_flow_error *error)
+{
+	return flow_hw_async_action_handle_query_update(dev, MLX5_HW_INV_QUEUE,
+							NULL, handle, update,
+							query, qu_mode, NULL,
+							error);
+}
+
 /**
  * Get aged-out flows of a given port on the given HWS flow queue.
  *
@@ -8425,12 +8614,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.async_action_create = flow_hw_action_handle_create,
 	.async_action_destroy = flow_hw_action_handle_destroy,
 	.async_action_update = flow_hw_action_handle_update,
+	.async_action_query_update = flow_hw_async_action_handle_query_update,
 	.async_action_query = flow_hw_action_handle_query,
 	.action_validate = flow_hw_action_validate,
 	.action_create = flow_hw_action_create,
 	.action_destroy = flow_hw_action_destroy,
 	.action_update = flow_hw_action_update,
 	.action_query = flow_hw_action_query,
+	.action_query_update = flow_hw_action_query_update,
 	.query = flow_hw_query,
 	.get_aged_flows = flow_hw_get_aged_flows,
 	.get_q_aged_flows = flow_hw_get_q_aged_flows,
-- 
2.34.1


  parent reply	other threads:[~2023-03-08 17:02 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-18 12:55 [PATCH 0/5] " Gregory Etelson
2023-01-18 12:55 ` [PATCH 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-01-18 12:55 ` [PATCH 2/5] net/mlx5: remove code duplication Gregory Etelson
2023-01-18 12:55 ` [PATCH 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-01-18 12:55 ` [PATCH 4/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-01-18 12:55 ` [PATCH 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-03-08  2:58 ` [PATCH 0/5] net/mlx5: add indirect QUOTA create/query/modify Suanming Mou
2023-03-08 17:01 ` [PATCH v2 " Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 2/5] net/mlx5: remove code duplication Gregory Etelson
2023-03-08 17:01   ` [PATCH v2 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-03-08 17:01   ` Gregory Etelson [this message]
2023-03-08 17:01   ` [PATCH v2 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-05-07  7:39 ` [PATCH v3 0/5] net/mlx5: support indirect quota flow action Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 1/5] net/mlx5: update query fields in async job structure Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 2/5] net/mlx5: remove code duplication Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 3/5] common/mlx5: update MTR ASO definitions Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 4/5] net/mlx5: add indirect QUOTA create/query/modify Gregory Etelson
2023-05-07  7:39   ` [PATCH v3 5/5] mlx5dr: Definer, translate RTE quota item Gregory Etelson
2023-05-25 14:18   ` [PATCH v3 0/5] net/mlx5: support indirect quota flow action Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230308170131.3195-5-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).