DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: getelson@nvidia.com,   <mkashani@nvidia.com>,
	rasland@nvidia.com, thomas@monjalon.net,
	"Dariusz Sosnowski" <dsosnowski@nvidia.com>,
	"Viacheslav Ovsiienko" <viacheslavo@nvidia.com>,
	"Ori Kam" <orika@nvidia.com>,
	"Suanming Mou" <suanmingm@nvidia.com>,
	"Matan Azrad" <matan@nvidia.com>
Subject: [PATCH 5/5] net/mlx5: add support for flow table resizing
Date: Fri, 2 Feb 2024 13:56:11 +0200	[thread overview]
Message-ID: <20240202115611.288892-6-getelson@nvidia.com> (raw)
In-Reply-To: <20240202115611.288892-1-getelson@nvidia.com>

Support template table API in PMD.
The patch allows to increase existing table capacity.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5.h         |   5 +
 drivers/net/mlx5/mlx5_flow.c    |  51 ++++
 drivers/net/mlx5/mlx5_flow.h    |  84 ++++--
 drivers/net/mlx5/mlx5_flow_hw.c | 512 +++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_host.c    | 211 +++++++++++++
 5 files changed, 758 insertions(+), 105 deletions(-)
 create mode 100644 drivers/net/mlx5/mlx5_host.c

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f2e2e04429..ff0ca7fa42 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -380,6 +380,9 @@ enum mlx5_hw_job_type {
 	MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */
 	MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */
 	MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */
+	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE, /* Non-optimized flow create job type. */
+	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY, /* Non-optimized destroy create job type. */
+	MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE, /* Move flow after table resize. */
 };
 
 enum mlx5_hw_indirect_type {
@@ -422,6 +425,8 @@ struct mlx5_hw_q {
 	struct mlx5_hw_q_job **job; /* LIFO header. */
 	struct rte_ring *indir_cq; /* Indirect action SW completion queue. */
 	struct rte_ring *indir_iq; /* Indirect action SW in progress queue. */
+	struct rte_ring *flow_transfer_pending;
+	struct rte_ring *flow_transfer_completed;
 } __rte_cache_aligned;
 
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 85e8c77c81..521119e138 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1198,6 +1198,20 @@ mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
 			  uint8_t pattern_template_index,
 			  uint32_t *hash, struct rte_flow_error *error);
 
+static int
+mlx5_template_table_resize(struct rte_eth_dev *dev,
+			   struct rte_flow_template_table *table,
+			   uint32_t nb_rules, struct rte_flow_error *error);
+static int
+mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+			       const struct rte_flow_op_attr *attr,
+			       struct rte_flow *rule, void *user_data,
+			       struct rte_flow_error *error);
+static int
+mlx5_table_resize_complete(struct rte_eth_dev *dev,
+			   struct rte_flow_template_table *table,
+			   struct rte_flow_error *error);
+
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
 	.create = mlx5_flow_create,
@@ -1253,6 +1267,9 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.async_action_list_handle_query_update =
 		mlx5_flow_async_action_list_handle_query_update,
 	.flow_calc_table_hash = mlx5_flow_calc_table_hash,
+	.flow_template_table_resize = mlx5_template_table_resize,
+	.flow_update_resized = mlx5_flow_async_update_resized,
+	.flow_template_table_resize_complete = mlx5_table_resize_complete,
 };
 
 /* Tunnel information. */
@@ -11115,6 +11132,40 @@ mlx5_flow_calc_table_hash(struct rte_eth_dev *dev,
 					  hash, error);
 }
 
+static int
+mlx5_template_table_resize(struct rte_eth_dev *dev,
+			   struct rte_flow_template_table *table,
+			   uint32_t nb_rules, struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops;
+
+	MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize, ENOTSUP);
+	return fops->table_resize(dev, table, nb_rules, error);
+}
+
+static int
+mlx5_table_resize_complete(struct rte_eth_dev *dev,
+			   struct rte_flow_template_table *table,
+			   struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops;
+
+	MLX5_DRV_FOPS_OR_ERR(dev, fops, table_resize_complete, ENOTSUP);
+	return fops->table_resize_complete(dev, table, error);
+}
+
+static int
+mlx5_flow_async_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+			       const struct rte_flow_op_attr *op_attr,
+			       struct rte_flow *rule, void *user_data,
+			       struct rte_flow_error *error)
+{
+	const struct mlx5_flow_driver_ops *fops;
+
+	MLX5_DRV_FOPS_OR_ERR(dev, fops, flow_update_resized, ENOTSUP);
+	return fops->flow_update_resized(dev, queue, op_attr, rule, user_data, error);
+}
+
 /**
  * Destroy all indirect actions (shared RSS).
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 497d4b0f0c..c7d84af659 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1210,6 +1210,7 @@ struct rte_flow {
 	uint32_t tunnel:1;
 	uint32_t meter:24; /**< Holds flow meter id. */
 	uint32_t indirect_type:2; /**< Indirect action type. */
+	uint32_t matcher_selector:1; /**< Matcher index in resizable table. */
 	uint32_t rix_mreg_copy;
 	/**< Index to metadata register copy table resource. */
 	uint32_t counter; /**< Holds flow counter. */
@@ -1255,6 +1256,7 @@ struct rte_flow_hw {
 	};
 	struct rte_flow_template_table *table; /* The table flow allcated from. */
 	uint8_t mt_idx;
+	uint8_t matcher_selector:1;
 	uint32_t age_idx;
 	cnt_id_t cnt_id;
 	uint32_t mtr_id;
@@ -1469,6 +1471,11 @@ struct mlx5_flow_group {
 #define MLX5_MAX_TABLE_RESIZE_NUM 64
 
 struct mlx5_multi_pattern_segment {
+	/*
+	 * Modify Header Argument Objects number allocated for action in that
+	 * segment.
+	 * Capacity is always power of 2.
+	 */
 	uint32_t capacity;
 	uint32_t head_index;
 	struct mlx5dr_action *mhdr_action;
@@ -1507,43 +1514,22 @@ mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx)
 	return mpctx->segments[0].head_index == 1;
 }
 
-static __rte_always_inline struct mlx5_multi_pattern_segment *
-mlx5_multi_pattern_segment_get_next(struct mlx5_tbl_multi_pattern_ctx *mpctx)
-{
-	int i;
-
-	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
-		if (!mpctx->segments[i].capacity)
-			return &mpctx->segments[i];
-	}
-	return NULL;
-}
-
-static __rte_always_inline struct mlx5_multi_pattern_segment *
-mlx5_multi_pattern_segment_find(struct mlx5_tbl_multi_pattern_ctx *mpctx,
-				uint32_t flow_resource_ix)
-{
-	int i;
-
-	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
-		uint32_t limit = mpctx->segments[i].head_index +
-				 mpctx->segments[i].capacity;
-
-		if (flow_resource_ix < limit)
-			return &mpctx->segments[i];
-	}
-	return NULL;
-}
-
 struct mlx5_flow_template_table_cfg {
 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
 	bool external; /* True if created by flow API, false if table is internal to PMD. */
 };
 
+struct mlx5_matcher_info {
+	struct mlx5dr_matcher *matcher; /* Template matcher. */
+	uint32_t refcnt;
+};
+
 struct rte_flow_template_table {
 	LIST_ENTRY(rte_flow_template_table) next;
 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
-	struct mlx5dr_matcher *matcher; /* Template matcher. */
+	struct mlx5_matcher_info matcher_info[2];
+	uint32_t matcher_selector;
+	rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */
 	/* Item templates bind to the table. */
 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
 	/* Action templates bind to the table. */
@@ -1556,8 +1542,34 @@ struct rte_flow_template_table {
 	uint8_t nb_action_templates; /* Action template number. */
 	uint32_t refcnt; /* Table reference counter. */
 	struct mlx5_tbl_multi_pattern_ctx mpctx;
+	struct mlx5dr_matcher_attr matcher_attr;
 };
 
+static __rte_always_inline struct mlx5dr_matcher *
+mlx5_table_matcher(const struct rte_flow_template_table *table)
+{
+	return table->matcher_info[table->matcher_selector].matcher;
+}
+
+static __rte_always_inline struct mlx5_multi_pattern_segment *
+mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table,
+				uint32_t flow_resource_ix)
+{
+	int i;
+	struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx;
+
+	if (likely(!rte_flow_table_resizable(0, &table->cfg.attr)))
+		return &mpctx->segments[0];
+	for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) {
+		uint32_t limit = mpctx->segments[i].head_index +
+				 mpctx->segments[i].capacity;
+
+		if (flow_resource_ix < limit)
+			return &mpctx->segments[i];
+	}
+	return NULL;
+}
+
 #endif
 
 /*
@@ -2177,6 +2189,17 @@ typedef int
 			 const struct rte_flow_item pattern[],
 			 uint8_t pattern_template_index,
 			 uint32_t *hash, struct rte_flow_error *error);
+typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev,
+				   struct rte_flow_template_table *table,
+				   uint32_t nb_rules, struct rte_flow_error *error);
+typedef int (*mlx5_flow_update_resized_t)
+			(struct rte_eth_dev *dev, uint32_t queue,
+			 const struct rte_flow_op_attr *attr,
+			 struct rte_flow *rule, void *user_data,
+			 struct rte_flow_error *error);
+typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev,
+				       struct rte_flow_template_table *table,
+				       struct rte_flow_error *error);
 
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
@@ -2250,6 +2273,9 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_async_action_list_handle_query_update_t
 		async_action_list_handle_query_update;
 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
+	mlx5_table_resize_t table_resize;
+	mlx5_flow_update_resized_t flow_update_resized;
+	table_resize_complete_t table_resize_complete;
 };
 
 /* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index e5c770c6fc..874ae00028 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2886,7 +2886,7 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 	int ret;
 	uint32_t age_idx = 0;
 	struct mlx5_aso_mtr *aso_mtr;
-	struct mlx5_multi_pattern_segment *mp_segment;
+	struct mlx5_multi_pattern_segment *mp_segment = NULL;
 
 	rte_memcpy(rule_acts, hw_acts->rule_acts, sizeof(*rule_acts) * at->dr_actions_num);
 	attr.group = table->grp->group_id;
@@ -2900,17 +2900,20 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 	} else {
 		attr.ingress = 1;
 	}
-	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0) {
+	if (hw_acts->mhdr && hw_acts->mhdr->mhdr_cmds_num > 0 && !hw_acts->mhdr->shared) {
 		uint16_t pos = hw_acts->mhdr->pos;
 
-		if (!hw_acts->mhdr->shared) {
-			rule_acts[pos].modify_header.offset =
-						job->flow->res_idx - 1;
-			rule_acts[pos].modify_header.data =
-						(uint8_t *)job->mhdr_cmd;
-			rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
-				   sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
-		}
+		mp_segment = mlx5_multi_pattern_segment_find(table, job->flow->res_idx);
+		if (!mp_segment || !mp_segment->mhdr_action)
+			return -1;
+		rule_acts[pos].action = mp_segment->mhdr_action;
+		/* offset is relative to DR action */
+		rule_acts[pos].modify_header.offset =
+					job->flow->res_idx - mp_segment->head_index;
+		rule_acts[pos].modify_header.data =
+					(uint8_t *)job->mhdr_cmd;
+		rte_memcpy(job->mhdr_cmd, hw_acts->mhdr->mhdr_cmds,
+			   sizeof(*job->mhdr_cmd) * hw_acts->mhdr->mhdr_cmds_num);
 	}
 	LIST_FOREACH(act_data, &hw_acts->act_list, next) {
 		uint32_t jump_group;
@@ -3017,10 +3020,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 			MLX5_ASSERT(ipv6_push->size == act_data->ipv6_ext.len);
 			break;
 		case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
-			mp_segment = mlx5_multi_pattern_segment_find(&table->mpctx, job->flow->res_idx);
-			if (!mp_segment || !mp_segment->mhdr_action)
-				return -1;
-			rule_acts[hw_acts->mhdr->pos].action = mp_segment->mhdr_action;
 			if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
 				ret = flow_hw_set_vlan_vid_construct(dev, job,
 								     act_data,
@@ -3177,11 +3176,13 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
 
 		if (ix < 0)
 			return -1;
-		mp_segment = mlx5_multi_pattern_segment_find(&table->mpctx, job->flow->res_idx);
+		if (!mp_segment)
+			mp_segment = mlx5_multi_pattern_segment_find(table, job->flow->res_idx);
 		if (!mp_segment || !mp_segment->reformat_action[ix])
 			return -1;
 		ra->action = mp_segment->reformat_action[ix];
-		ra->reformat.offset = job->flow->res_idx - 1;
+		/* reformat offset is relative to selected DR action */
+		ra->reformat.offset = job->flow->res_idx - mp_segment->head_index;
 		ra->reformat.data = buf;
 	}
 	if (hw_acts->push_remove && !hw_acts->push_remove->shared) {
@@ -3353,10 +3354,26 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
 					    pattern_template_index, job);
 	if (!rule_items)
 		goto error;
-	ret = mlx5dr_rule_create(table->matcher,
-				 pattern_template_index, rule_items,
-				 action_template_index, rule_acts,
-				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
+	if (likely(!rte_flow_table_resizable(dev->data->port_id, &table->cfg.attr))) {
+		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
+					 pattern_template_index, rule_items,
+					 action_template_index, rule_acts,
+					 &rule_attr,
+					 (struct mlx5dr_rule *)flow->rule);
+	} else {
+		uint32_t selector;
+
+		job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;
+		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
+		selector = table->matcher_selector;
+		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
+					 pattern_template_index, rule_items,
+					 action_template_index, rule_acts,
+					 &rule_attr,
+					 (struct mlx5dr_rule *)flow->rule);
+		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
+		flow->matcher_selector = selector;
+	}
 	if (likely(!ret))
 		return (struct rte_flow *)flow;
 error:
@@ -3473,9 +3490,23 @@ flow_hw_async_flow_create_by_index(struct rte_eth_dev *dev,
 		rte_errno = EINVAL;
 		goto error;
 	}
-	ret = mlx5dr_rule_create(table->matcher,
-				 0, items, action_template_index, rule_acts,
-				 &rule_attr, (struct mlx5dr_rule *)flow->rule);
+	if (likely(!rte_flow_table_resizable(dev->data->port_id, &table->cfg.attr))) {
+		ret = mlx5dr_rule_create(table->matcher_info[0].matcher,
+					 0, items, action_template_index,
+					 rule_acts, &rule_attr,
+					 (struct mlx5dr_rule *)flow->rule);
+	} else {
+		uint32_t selector;
+
+		job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE;
+		rte_rwlock_read_lock(&table->matcher_replace_rwlk);
+		selector = table->matcher_selector;
+		ret = mlx5dr_rule_create(table->matcher_info[selector].matcher,
+					 0, items, action_template_index,
+					 rule_acts, &rule_attr,
+					 (struct mlx5dr_rule *)flow->rule);
+		rte_rwlock_read_unlock(&table->matcher_replace_rwlk);
+	}
 	if (likely(!ret))
 		return (struct rte_flow *)flow;
 error:
@@ -3655,7 +3686,8 @@ flow_hw_async_flow_destroy(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "fail to destroy rte flow: flow queue full");
-	job->type = MLX5_HW_Q_JOB_TYPE_DESTROY;
+	job->type = !rte_flow_table_resizable(dev->data->port_id, &fh->table->cfg.attr) ?
+		    MLX5_HW_Q_JOB_TYPE_DESTROY : MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY;
 	job->user_data = user_data;
 	job->flow = fh;
 	rule_attr.user_data = job;
@@ -3767,6 +3799,26 @@ flow_hw_pull_legacy_indirect_comp(struct rte_eth_dev *dev, struct mlx5_hw_q_job
 	}
 }
 
+static __rte_always_inline int
+mlx5_hw_pull_flow_transfer_comp(struct rte_eth_dev *dev,
+				uint32_t queue, struct rte_flow_op_result res[],
+				uint16_t n_res)
+{
+	uint32_t size, i;
+	struct mlx5_hw_q_job *job = NULL;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct rte_ring *ring = priv->hw_q[queue].flow_transfer_completed;
+
+	size = RTE_MIN(rte_ring_count(ring), n_res);
+	for (i = 0; i < size; i++) {
+		res[i].status = RTE_FLOW_OP_SUCCESS;
+		rte_ring_dequeue(ring, (void **)&job);
+		res[i].user_data = job->user_data;
+		flow_hw_job_put(priv, job, queue);
+	}
+	return (int)size;
+}
+
 static inline int
 __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 				 uint32_t queue,
@@ -3815,6 +3867,76 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev,
 	return ret_comp;
 }
 
+static __rte_always_inline void
+hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
+			       struct mlx5_hw_q_job *job,
+			       uint32_t queue, struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
+	struct rte_flow_hw *flow = job->flow;
+	struct rte_flow_template_table *table = flow->table;
+	/* Release the original resource index in case of update. */
+	uint32_t res_idx = flow->res_idx;
+
+	if (flow->fate_type == MLX5_FLOW_FATE_JUMP)
+		flow_hw_jump_release(dev, flow->jump);
+	else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE)
+		mlx5_hrxq_obj_release(dev, flow->hrxq);
+	if (mlx5_hws_cnt_id_valid(flow->cnt_id))
+		flow_hw_age_count_release(priv, queue,
+					  flow, error);
+	if (flow->mtr_id) {
+		mlx5_ipool_free(pool->idx_pool,	flow->mtr_id);
+		flow->mtr_id = 0;
+	}
+	if (job->type != MLX5_HW_Q_JOB_TYPE_UPDATE) {
+		if (table) {
+			mlx5_ipool_free(table->resource, res_idx);
+			mlx5_ipool_free(table->flow, flow->idx);
+		}
+	} else {
+		rte_memcpy(flow, job->upd_flow,
+			   offsetof(struct rte_flow_hw, rule));
+		mlx5_ipool_free(table->resource, res_idx);
+	}
+}
+
+static __rte_always_inline void
+hw_cmpl_resizable_tbl(struct rte_eth_dev *dev,
+		      struct mlx5_hw_q_job *job,
+		      uint32_t queue, enum rte_flow_op_status status,
+		      struct rte_flow_error *error)
+{
+	struct rte_flow_hw *flow = job->flow;
+	struct rte_flow_template_table *table = flow->table;
+	uint32_t selector = flow->matcher_selector;
+	uint32_t other_selector = (selector + 1) & 1;
+
+	switch (job->type) {
+	case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:
+		__atomic_add_fetch(&table->matcher_info[selector].refcnt,
+				   1, __ATOMIC_RELAXED);
+		break;
+	case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:
+		__atomic_sub_fetch(&table->matcher_info[selector].refcnt, 1,
+				   __ATOMIC_RELAXED);
+		hw_cmpl_flow_update_or_destroy(dev, job, queue, error);
+		break;
+	case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:
+		if (status == RTE_FLOW_OP_SUCCESS) {
+			__atomic_sub_fetch(&table->matcher_info[selector].refcnt, 1,
+					   __ATOMIC_RELAXED);
+			__atomic_add_fetch(&table->matcher_info[other_selector].refcnt,
+					   1, __ATOMIC_RELAXED);
+			flow->matcher_selector = other_selector;
+		}
+		break;
+	default:
+		break;
+	}
+}
+
 /**
  * Pull the enqueued flows.
  *
@@ -3843,9 +3965,7 @@ flow_hw_pull(struct rte_eth_dev *dev,
 	     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_aso_mtr_pool *pool = priv->hws_mpool;
 	struct mlx5_hw_q_job *job;
-	uint32_t res_idx;
 	int ret, i;
 
 	/* 1. Pull the flow completion. */
@@ -3856,31 +3976,20 @@ flow_hw_pull(struct rte_eth_dev *dev,
 				"fail to query flow queue");
 	for (i = 0; i <  ret; i++) {
 		job = (struct mlx5_hw_q_job *)res[i].user_data;
-		/* Release the original resource index in case of update. */
-		res_idx = job->flow->res_idx;
 		/* Restore user data. */
 		res[i].user_data = job->user_data;
-		if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY ||
-		    job->type == MLX5_HW_Q_JOB_TYPE_UPDATE) {
-			if (job->flow->fate_type == MLX5_FLOW_FATE_JUMP)
-				flow_hw_jump_release(dev, job->flow->jump);
-			else if (job->flow->fate_type == MLX5_FLOW_FATE_QUEUE)
-				mlx5_hrxq_obj_release(dev, job->flow->hrxq);
-			if (mlx5_hws_cnt_id_valid(job->flow->cnt_id))
-				flow_hw_age_count_release(priv, queue,
-							  job->flow, error);
-			if (job->flow->mtr_id) {
-				mlx5_ipool_free(pool->idx_pool,	job->flow->mtr_id);
-				job->flow->mtr_id = 0;
-			}
-			if (job->type == MLX5_HW_Q_JOB_TYPE_DESTROY) {
-				mlx5_ipool_free(job->flow->table->resource, res_idx);
-				mlx5_ipool_free(job->flow->table->flow, job->flow->idx);
-			} else {
-				rte_memcpy(job->flow, job->upd_flow,
-					offsetof(struct rte_flow_hw, rule));
-				mlx5_ipool_free(job->flow->table->resource, res_idx);
-			}
+		switch (job->type) {
+		case MLX5_HW_Q_JOB_TYPE_DESTROY:
+		case MLX5_HW_Q_JOB_TYPE_UPDATE:
+			hw_cmpl_flow_update_or_destroy(dev, job, queue, error);
+			break;
+		case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_CREATE:
+		case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE:
+		case MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_DESTROY:
+			hw_cmpl_resizable_tbl(dev, job, queue, res[i].status, error);
+			break;
+		default:
+			break;
 		}
 		flow_hw_job_put(priv, job, queue);
 	}
@@ -3888,24 +3997,36 @@ flow_hw_pull(struct rte_eth_dev *dev,
 	if (ret < n_res)
 		ret += __flow_hw_pull_indir_action_comp(dev, queue, &res[ret],
 							n_res - ret);
+	if (ret < n_res)
+		ret += mlx5_hw_pull_flow_transfer_comp(dev, queue, &res[ret],
+						       n_res - ret);
+
 	return ret;
 }
 
+static uint32_t
+mlx5_hw_push_queue(struct rte_ring *pending_q, struct rte_ring *cmpl_q)
+{
+	void *job = NULL;
+	uint32_t i, size = rte_ring_count(pending_q);
+
+	for (i = 0; i < size; i++) {
+		rte_ring_dequeue(pending_q, &job);
+		rte_ring_enqueue(cmpl_q, job);
+	}
+	return size;
+}
+
 static inline uint32_t
 __flow_hw_push_action(struct rte_eth_dev *dev,
 		    uint32_t queue)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_ring *iq = priv->hw_q[queue].indir_iq;
-	struct rte_ring *cq = priv->hw_q[queue].indir_cq;
-	void *job = NULL;
-	uint32_t ret, i;
+	struct mlx5_hw_q *hw_q = &priv->hw_q[queue];
 
-	ret = rte_ring_count(iq);
-	for (i = 0; i < ret; i++) {
-		rte_ring_dequeue(iq, &job);
-		rte_ring_enqueue(cq, job);
-	}
+	mlx5_hw_push_queue(hw_q->indir_iq, hw_q->indir_cq);
+	mlx5_hw_push_queue(hw_q->flow_transfer_pending,
+			   hw_q->flow_transfer_completed);
 	if (!priv->shared_host) {
 		if (priv->hws_ctpool)
 			mlx5_aso_push_wqe(priv->sh,
@@ -4314,6 +4435,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
 	grp = container_of(ge, struct mlx5_flow_group, entry);
 	tbl->grp = grp;
 	/* Prepare matcher information. */
+	matcher_attr.resizable = !!rte_flow_table_resizable(dev->data->port_id, &table_cfg->attr);
 	matcher_attr.optimize_flow_src = MLX5DR_MATCHER_FLOW_SRC_ANY;
 	matcher_attr.priority = attr->flow_attr.priority;
 	matcher_attr.optimize_using_rule_idx = true;
@@ -4332,7 +4454,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
 			       RTE_FLOW_TABLE_SPECIALIZE_TRANSFER_VPORT_ORIG;
 
 		if ((attr->specialize & val) == val) {
-			DRV_LOG(INFO, "Invalid hint value %x",
+			DRV_LOG(ERR, "Invalid hint value %x",
 				attr->specialize);
 			rte_errno = EINVAL;
 			goto it_error;
@@ -4374,10 +4496,11 @@ flow_hw_table_create(struct rte_eth_dev *dev,
 		i = nb_item_templates;
 		goto it_error;
 	}
-	tbl->matcher = mlx5dr_matcher_create
+	tbl->matcher_info[0].matcher = mlx5dr_matcher_create
 		(tbl->grp->tbl, mt, nb_item_templates, at, nb_action_templates, &matcher_attr);
-	if (!tbl->matcher)
+	if (!tbl->matcher_info[0].matcher)
 		goto at_error;
+	tbl->matcher_attr = matcher_attr;
 	tbl->type = attr->flow_attr.transfer ? MLX5DR_TABLE_TYPE_FDB :
 		    (attr->flow_attr.egress ? MLX5DR_TABLE_TYPE_NIC_TX :
 		    MLX5DR_TABLE_TYPE_NIC_RX);
@@ -4385,6 +4508,7 @@ flow_hw_table_create(struct rte_eth_dev *dev,
 		LIST_INSERT_HEAD(&priv->flow_hw_tbl, tbl, next);
 	else
 		LIST_INSERT_HEAD(&priv->flow_hw_tbl_ongo, tbl, next);
+	rte_rwlock_init(&tbl->matcher_replace_rwlk);
 	return tbl;
 at_error:
 	for (i = 0; i < nb_action_templates; i++) {
@@ -4556,6 +4680,11 @@ flow_hw_template_table_create(struct rte_eth_dev *dev,
 
 	if (flow_hw_translate_group(dev, &cfg, group, &cfg.attr.flow_attr.group, error))
 		return NULL;
+	if (!cfg.attr.flow_attr.group && rte_flow_table_resizable(dev->data->port_id, attr)) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "table cannot be resized: invalid group");
+		return NULL;
+	}
 	return flow_hw_table_create(dev, &cfg, item_templates, nb_item_templates,
 				    action_templates, nb_action_templates, error);
 }
@@ -4628,7 +4757,10 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
 				   1, __ATOMIC_RELAXED);
 	}
 	flow_hw_destroy_table_multi_pattern_ctx(table);
-	mlx5dr_matcher_destroy(table->matcher);
+	if (table->matcher_info[0].matcher)
+		mlx5dr_matcher_destroy(table->matcher_info[0].matcher);
+	if (table->matcher_info[1].matcher)
+		mlx5dr_matcher_destroy(table->matcher_info[1].matcher);
 	mlx5_hlist_unregister(priv->sh->groups, &table->grp->entry);
 	mlx5_ipool_destroy(table->resource);
 	mlx5_ipool_destroy(table->flow);
@@ -9178,6 +9310,16 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
 	return true;
 }
 
+static __rte_always_inline struct rte_ring *
+mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char *str)
+{
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+
+	snprintf(mz_name, sizeof(mz_name), "port_%u_%s_%u", port_id, str, queue);
+	return rte_ring_create(mz_name, size, SOCKET_ID_ANY,
+			       RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+}
+
 /**
  * Configure port HWS resources.
  *
@@ -9305,7 +9447,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
 		goto err;
 	}
 	for (i = 0; i < nb_q_updated; i++) {
-		char mz_name[RTE_MEMZONE_NAMESIZE];
 		uint8_t *encap = NULL, *push = NULL;
 		struct mlx5_modification_cmd *mhdr_cmd = NULL;
 		struct rte_flow_item *items = NULL;
@@ -9339,22 +9480,22 @@ flow_hw_configure(struct rte_eth_dev *dev,
 			job[j].upd_flow = &upd_flow[j];
 			priv->hw_q[i].job[j] = &job[j];
 		}
-		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_cq_%u",
-			 dev->data->port_id, i);
-		priv->hw_q[i].indir_cq = rte_ring_create(mz_name,
-				_queue_attr[i]->size, SOCKET_ID_ANY,
-				RING_F_SP_ENQ | RING_F_SC_DEQ |
-				RING_F_EXACT_SZ);
+		priv->hw_q[i].indir_cq = mlx5_hwq_ring_create
+			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_cq");
 		if (!priv->hw_q[i].indir_cq)
 			goto err;
-		snprintf(mz_name, sizeof(mz_name), "port_%u_indir_act_iq_%u",
-			 dev->data->port_id, i);
-		priv->hw_q[i].indir_iq = rte_ring_create(mz_name,
-				_queue_attr[i]->size, SOCKET_ID_ANY,
-				RING_F_SP_ENQ | RING_F_SC_DEQ |
-				RING_F_EXACT_SZ);
+		priv->hw_q[i].indir_iq = mlx5_hwq_ring_create
+			(dev->data->port_id, i, _queue_attr[i]->size, "indir_act_iq");
 		if (!priv->hw_q[i].indir_iq)
 			goto err;
+		priv->hw_q[i].flow_transfer_pending = mlx5_hwq_ring_create
+			(dev->data->port_id, i, _queue_attr[i]->size, "pending_transfer");
+		if (!priv->hw_q[i].flow_transfer_pending)
+			goto err;
+		priv->hw_q[i].flow_transfer_completed = mlx5_hwq_ring_create
+			(dev->data->port_id, i, _queue_attr[i]->size, "completed_transfer");
+		if (!priv->hw_q[i].flow_transfer_completed)
+			goto err;
 	}
 	dr_ctx_attr.pd = priv->sh->cdev->pd;
 	dr_ctx_attr.queues = nb_q_updated;
@@ -9570,6 +9711,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
 	for (i = 0; i < nb_q_updated; i++) {
 		rte_ring_free(priv->hw_q[i].indir_iq);
 		rte_ring_free(priv->hw_q[i].indir_cq);
+		rte_ring_free(priv->hw_q[i].flow_transfer_pending);
+		rte_ring_free(priv->hw_q[i].flow_transfer_completed);
 	}
 	mlx5_free(priv->hw_q);
 	priv->hw_q = NULL;
@@ -11494,7 +11637,7 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,
 	items = flow_hw_get_rule_items(dev, table, pattern,
 				       pattern_template_index,
 				       &job);
-	res = mlx5dr_rule_hash_calculate(table->matcher, items,
+	res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
 					 pattern_template_index,
 					 MLX5DR_RULE_HASH_CALC_MODE_RAW,
 					 hash);
@@ -11506,6 +11649,220 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,
 	return 0;
 }
 
+static int
+flow_hw_table_resize_multi_pattern_actions(struct rte_eth_dev *dev,
+					   struct rte_flow_template_table *table,
+					   uint32_t nb_flows,
+					   struct rte_flow_error *error)
+{
+	struct mlx5_multi_pattern_segment *segment = table->mpctx.segments;
+	uint32_t bulk_size;
+	int i, ret;
+
+	/**
+	 * Segment always allocates Modify Header Argument Objects number in
+	 * powers of 2.
+	 * On resize, PMD adds minimal required argument objects number.
+	 * For example, if table size was 10, it allocated 16 argument objects.
+	 * Resize to 15 will not add new objects.
+	 */
+	for (i = 1;
+	     i < MLX5_MAX_TABLE_RESIZE_NUM && segment->capacity;
+	     i++, segment++);
+	if (i == MLX5_MAX_TABLE_RESIZE_NUM)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "too many resizes");
+	if (segment->head_index - 1 >= nb_flows)
+		return 0;
+	bulk_size = rte_align32pow2(nb_flows - segment->head_index + 1);
+	ret = mlx5_tbl_multi_pattern_process(dev, table, segment,
+					     rte_log2_u32(bulk_size),
+					     error);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "too many resizes");
+	return i;
+}
+
+static int
+flow_hw_table_resize(struct rte_eth_dev *dev,
+		     struct rte_flow_template_table *table,
+		     uint32_t nb_flows,
+		     struct rte_flow_error *error)
+{
+	struct mlx5dr_action_template *at[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
+	struct mlx5dr_match_template *mt[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
+	struct mlx5dr_matcher_attr matcher_attr = table->matcher_attr;
+	struct mlx5_multi_pattern_segment *segment = NULL;
+	struct mlx5dr_matcher *matcher = NULL;
+	uint32_t i, selector = table->matcher_selector;
+	uint32_t other_selector = (selector + 1) & 1;
+	int ret;
+
+	if (!rte_flow_table_resizable(dev->data->port_id, &table->cfg.attr))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "no resizable attribute");
+	if (table->matcher_info[other_selector].matcher)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "last table resize was not completed");
+	if (nb_flows <= table->cfg.attr.nb_flows)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "shrinking table is not supported");
+	ret = mlx5_ipool_resize(table->flow, nb_flows);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "cannot resize flows pool");
+	ret = mlx5_ipool_resize(table->resource, nb_flows);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "cannot resize resources pool");
+	if (mlx5_is_multi_pattern_active(&table->mpctx)) {
+		ret = flow_hw_table_resize_multi_pattern_actions(dev, table, nb_flows, error);
+		if (ret < 0)
+			return ret;
+		if (ret > 0)
+			segment = table->mpctx.segments + ret;
+	}
+	for (i = 0; i < table->nb_item_templates; i++)
+		mt[i] = table->its[i]->mt;
+	for (i = 0; i < table->nb_action_templates; i++)
+		at[i] = table->ats[i].action_template->tmpl;
+	nb_flows = rte_align32pow2(nb_flows);
+	matcher_attr.rule.num_log = rte_log2_u32(nb_flows);
+	matcher = mlx5dr_matcher_create(table->grp->tbl, mt,
+					table->nb_item_templates, at,
+					table->nb_action_templates,
+					&matcher_attr);
+	if (!matcher) {
+		ret = rte_flow_error_set(error, rte_errno,
+					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					 table, "failed to create new matcher");
+		goto error;
+	}
+	rte_rwlock_write_lock(&table->matcher_replace_rwlk);
+	ret = mlx5dr_matcher_resize_set_target
+			(table->matcher_info[selector].matcher, matcher);
+	if (ret) {
+		rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
+		ret = rte_flow_error_set(error, rte_errno,
+					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					 table, "failed to initiate matcher swap");
+		goto error;
+	}
+	table->cfg.attr.nb_flows = nb_flows;
+	table->matcher_info[other_selector].matcher = matcher;
+	table->matcher_info[other_selector].refcnt = 0;
+	table->matcher_selector = other_selector;
+	rte_rwlock_write_unlock(&table->matcher_replace_rwlk);
+	return 0;
+error:
+	if (segment)
+		mlx5_destroy_multi_pattern_segment(segment);
+	if (matcher) {
+		ret = mlx5dr_matcher_destroy(matcher);
+		return rte_flow_error_set(error, rte_errno,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "failed to destroy new matcher");
+	}
+	return ret;
+}
+
+static int
+flow_hw_table_resize_complete(__rte_unused struct rte_eth_dev *dev,
+			      struct rte_flow_template_table *table,
+			      struct rte_flow_error *error)
+{
+	int ret;
+	uint32_t selector = table->matcher_selector;
+	uint32_t other_selector = (selector + 1) & 1;
+	struct mlx5_matcher_info *matcher_info = &table->matcher_info[other_selector];
+
+	if (!rte_flow_table_resizable(dev->data->port_id, &table->cfg.attr))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "no resizable attribute");
+	if (!matcher_info->matcher || matcher_info->refcnt)
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "cannot complete table resize");
+	ret = mlx5dr_matcher_destroy(matcher_info->matcher);
+	if (ret)
+		return rte_flow_error_set(error, rte_errno,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  table, "failed to destroy retired matcher");
+	matcher_info->matcher = NULL;
+	return 0;
+}
+
+static int
+flow_hw_update_resized(struct rte_eth_dev *dev, uint32_t queue,
+		       const struct rte_flow_op_attr *attr,
+		       struct rte_flow *flow, void *user_data,
+		       struct rte_flow_error *error)
+{
+	int ret;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_hw_q_job *job;
+	struct rte_flow_hw *hw_flow = (struct rte_flow_hw *)flow;
+	struct rte_flow_template_table *table = hw_flow->table;
+	uint32_t table_selector = table->matcher_selector;
+	uint32_t rule_selector = hw_flow->matcher_selector;
+	uint32_t other_selector;
+	struct mlx5dr_matcher *other_matcher;
+	struct mlx5dr_rule_attr rule_attr = {
+		.queue_id = queue,
+		.burst = attr->postpone,
+	};
+
+	/**
+	 * mlx5dr_matcher_resize_rule_move() accepts original table matcher -
+	 * the one that was used BEFORE table resize.
+	 * Since the function is called AFTER table resize,
+	 * `table->matcher_selector` always points to the new matcher and
+	 * `hw_flow->matcher_selector` points to a matcher used to create the flow.
+	 */
+	other_selector = rule_selector == table_selector ?
+			 (rule_selector + 1) & 1 : rule_selector;
+	other_matcher = table->matcher_info[other_selector].matcher;
+	if (!other_matcher)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "no active table resize");
+	job = flow_hw_job_get(priv, queue);
+	if (!job)
+		return rte_flow_error_set(error, ENOMEM,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "queue is full");
+	job->type = MLX5_HW_Q_JOB_TYPE_RSZTBL_FLOW_MOVE;
+	job->user_data = user_data;
+	job->flow = hw_flow;
+	rule_attr.user_data = job;
+	if (rule_selector == table_selector) {
+		struct rte_ring *ring = !attr->postpone ?
+					priv->hw_q[queue].flow_transfer_completed :
+					priv->hw_q[queue].flow_transfer_pending;
+		rte_ring_enqueue(ring, job);
+		return 0;
+	}
+	ret = mlx5dr_matcher_resize_rule_move(other_matcher,
+					      (struct mlx5dr_rule *)hw_flow->rule,
+					      &rule_attr);
+	if (ret) {
+		flow_hw_job_put(priv, job, queue);
+		return rte_flow_error_set(error, rte_errno,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+					  "flow transfer failed");
+	}
+	return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.info_get = flow_hw_info_get,
 	.configure = flow_hw_configure,
@@ -11517,11 +11874,14 @@ const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {
 	.actions_template_destroy = flow_hw_actions_template_destroy,
 	.template_table_create = flow_hw_template_table_create,
 	.template_table_destroy = flow_hw_table_destroy,
+	.table_resize = flow_hw_table_resize,
 	.group_set_miss_actions = flow_hw_group_set_miss_actions,
 	.async_flow_create = flow_hw_async_flow_create,
 	.async_flow_create_by_index = flow_hw_async_flow_create_by_index,
 	.async_flow_update = flow_hw_async_flow_update,
 	.async_flow_destroy = flow_hw_async_flow_destroy,
+	.flow_update_resized = flow_hw_update_resized,
+	.table_resize_complete = flow_hw_table_resize_complete,
 	.pull = flow_hw_pull,
 	.push = flow_hw_push,
 	.async_action_create = flow_hw_action_handle_create,
diff --git a/drivers/net/mlx5/mlx5_host.c b/drivers/net/mlx5/mlx5_host.c
new file mode 100644
index 0000000000..4f3356d6e6
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_host.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <rte_flow.h>
+#include <rte_pmd_mlx5.h>
+#include <mlx5_malloc.h>
+
+#include "mlx5_flow.h"
+#include "mlx5.h"
+
+#include "hws/host/mlx5dr_host.h"
+
+struct rte_pmd_mlx5_dr_action_cache {
+	enum rte_flow_action_type type;
+	void *release_data;
+	struct mlx5dr_dev_action *dr_dev_action;
+	LIST_ENTRY(rte_pmd_mlx5_dr_action_cache) next;
+};
+
+struct rte_pmd_mlx5_dev_process {
+	struct mlx5dr_dev_process *dr_dev_process;
+	struct mlx5dr_dev_context *dr_dev_ctx;
+	uint16_t port_id;
+	LIST_HEAD(action_head, rte_pmd_mlx5_dr_action_cache) head;
+};
+
+struct rte_pmd_mlx5_dev_process *
+rte_pmd_mlx5_host_process_open(uint16_t port_id,
+			       struct rte_pmd_mlx5_host_device_info *info)
+{
+	struct rte_pmd_mlx5_dev_process *dev_process;
+	struct mlx5dr_dev_context_attr dr_attr = {0};
+	struct mlx5dr_dev_process *dr_dev_process;
+	const struct mlx5_priv *priv;
+
+	dev_process = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+				  sizeof(struct rte_pmd_mlx5_dev_process),
+				  MLX5_MALLOC_ALIGNMENT,
+				  SOCKET_ID_ANY);
+	if (!dev_process) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	if (info->type == RTE_PMD_MLX5_DEVICE_TYPE_DPA)
+		dr_dev_process = mlx5dr_host_process_open(info->dpa.process, info->dpa.outbox);
+	else
+		dr_dev_process = mlx5dr_host_process_open(NULL, NULL);
+
+	if (!dr_dev_process)
+		goto free_dev_process;
+
+	dev_process->port_id = port_id;
+	dev_process->dr_dev_process = dr_dev_process;
+
+	priv = rte_eth_devices[port_id].data->dev_private;
+	dr_attr.queue_size = info->queue_size;
+	dr_attr.queues = info->queues;
+
+	dev_process->dr_dev_ctx =  mlx5dr_host_context_bind(dr_dev_process,
+							    priv->dr_ctx,
+							    &dr_attr);
+	if (!dev_process->dr_dev_ctx)
+		goto close_process;
+
+	return (struct rte_pmd_mlx5_dev_process *)dev_process;
+
+close_process:
+	mlx5dr_host_process_close(dr_dev_process);
+free_dev_process:
+	mlx5_free(dev_process);
+	return NULL;
+}
+
+int
+rte_pmd_mlx5_host_process_close(struct rte_pmd_mlx5_dev_process *dev_process)
+{
+	struct mlx5dr_dev_process *dr_dev_process = dev_process->dr_dev_process;
+
+	mlx5dr_host_context_unbind(dr_dev_process, dev_process->dr_dev_ctx);
+	mlx5dr_host_process_close(dr_dev_process);
+	mlx5_free(dev_process);
+	return 0;
+}
+
+struct rte_pmd_mlx5_dev_ctx *
+rte_pmd_mlx5_host_get_dev_ctx(struct rte_pmd_mlx5_dev_process *dev_process)
+{
+	return (struct rte_pmd_mlx5_dev_ctx *)dev_process->dr_dev_ctx;
+}
+
+struct rte_pmd_mlx5_dev_table *
+rte_pmd_mlx5_host_table_bind(struct rte_pmd_mlx5_dev_process *dev_process,
+			     struct rte_flow_template_table *table)
+{
+	struct mlx5dr_dev_process *dr_dev_process;
+	struct mlx5dr_dev_matcher *dr_dev_matcher;
+	struct mlx5dr_matcher *matcher;
+
+	if (rte_flow_table_resizable(&table->cfg.attr)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	dr_dev_process = dev_process->dr_dev_process;
+	matcher = table->matcher_info[0].matcher;
+
+	dr_dev_matcher = mlx5dr_host_matcher_bind(dr_dev_process, matcher);
+
+	return (struct rte_pmd_mlx5_dev_table *)dr_dev_matcher;
+}
+
+int
+rte_pmd_mlx5_host_table_unbind(struct rte_pmd_mlx5_dev_process *dev_process,
+			       struct rte_pmd_mlx5_dev_table *dev_table)
+{
+	struct mlx5dr_dev_process *dr_dev_process;
+	struct mlx5dr_dev_matcher *dr_dev_matcher;
+
+	dr_dev_process = dev_process->dr_dev_process;
+	dr_dev_matcher = (struct mlx5dr_dev_matcher *)dev_table;
+
+	return mlx5dr_host_matcher_unbind(dr_dev_process, dr_dev_matcher);
+}
+
+struct rte_pmd_mlx5_dev_action *
+rte_pmd_mlx5_host_action_bind(struct rte_pmd_mlx5_dev_process *dev_process,
+			      struct rte_pmd_mlx5_host_action *action)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[dev_process->port_id];
+	struct rte_pmd_mlx5_dr_action_cache *action_cache;
+	struct mlx5dr_dev_process *dr_dev_process;
+	struct mlx5dr_dev_action *dr_dev_action;
+	struct mlx5dr_action *dr_action;
+	void *release_data;
+
+	dr_dev_process = dev_process->dr_dev_process;
+
+	action_cache = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+				   sizeof(*action_cache),
+				   MLX5_MALLOC_ALIGNMENT,
+				   SOCKET_ID_ANY);
+	if (!action_cache) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	dr_action = mlx5_flow_hw_get_dr_action(dev, action, &release_data);
+	if (!dr_action) {
+		DRV_LOG(ERR, "Failed to get dr action type %d", action->type);
+		goto free_rte_host_action;
+	}
+
+	dr_dev_action = mlx5dr_host_action_bind(dr_dev_process, dr_action);
+	if (!dr_dev_action) {
+		DRV_LOG(ERR, "Failed to bind dr_action");
+		goto put_dr_action;
+	}
+
+	action_cache->type = action->type;
+	action_cache->release_data = release_data;
+	action_cache->dr_dev_action = dr_dev_action;
+	LIST_INSERT_HEAD(&dev_process->head, action_cache, next);
+
+	return (struct rte_pmd_mlx5_dev_action *)dr_dev_action;
+
+put_dr_action:
+	mlx5_flow_hw_put_dr_action(dev, action->type, release_data);
+free_rte_host_action:
+	mlx5_free(action_cache);
+	return NULL;
+}
+
+int
+rte_pmd_mlx5_host_action_unbind(struct rte_pmd_mlx5_dev_process *dev_process,
+				struct rte_pmd_mlx5_dev_action *dev_action)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[dev_process->port_id];
+	struct rte_pmd_mlx5_dr_action_cache *action_cache;
+	struct mlx5dr_dev_process *dr_dev_process;
+	struct mlx5dr_dev_action *dr_dev_action;
+
+	dr_dev_process = dev_process->dr_dev_process;
+	dr_dev_action = (struct mlx5dr_dev_action *)dev_action;
+
+	LIST_FOREACH(action_cache, &dev_process->head, next) {
+		if (action_cache->dr_dev_action == dr_dev_action) {
+			LIST_REMOVE(action_cache, next);
+			mlx5dr_host_action_unbind(dr_dev_process, dr_dev_action);
+			mlx5_flow_hw_put_dr_action(dev,
+						   action_cache->type,
+						   action_cache->release_data);
+			mlx5_free(action_cache);
+			return 0;
+		}
+	}
+
+	DRV_LOG(ERR, "Failed to find dr aciton to unbind");
+	rte_errno = EINVAL;
+	return rte_errno;
+}
+
+size_t rte_pmd_mlx5_host_get_dev_rule_handle_size(void)
+{
+	return mlx5dr_host_rule_get_dev_rule_handle_size();
+}
-- 
2.39.2


      parent reply	other threads:[~2024-02-02 11:57 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-02 11:56 [PATCH 0/5] " Gregory Etelson
2024-02-02 11:56 ` [PATCH 1/5] net/mlx5/hws: add support for resizable matchers Gregory Etelson
2024-02-28 10:25   ` [PATCH v2 0/4] net/mlx5: add support for flow table resizing Gregory Etelson
2024-02-28 10:25     ` [PATCH v2 1/4] net/mlx5: add resize function to ipool Gregory Etelson
2024-02-28 10:25     ` [PATCH v2 2/4] net/mlx5: fix parameters verification in HWS table create Gregory Etelson
2024-02-28 10:25     ` [PATCH v2 3/4] net/mlx5: move multi-pattern actions management to table level Gregory Etelson
2024-02-28 10:25     ` [PATCH v2 4/4] net/mlx5: add support for flow table resizing Gregory Etelson
2024-02-28 13:33   ` [PATCH v3 0/4] " Gregory Etelson
2024-02-28 13:33     ` [PATCH v3 1/4] net/mlx5: add resize function to ipool Gregory Etelson
2024-02-28 13:33     ` [PATCH v3 2/4] net/mlx5: fix parameters verification in HWS table create Gregory Etelson
2024-02-28 13:33     ` [PATCH v3 3/4] net/mlx5: move multi-pattern actions management to table level Gregory Etelson
2024-02-28 13:33     ` [PATCH v3 4/4] net/mlx5: add support for flow table resizing Gregory Etelson
2024-02-28 15:50     ` [PATCH v3 0/4] " Raslan Darawsheh
2024-02-02 11:56 ` [PATCH 2/5] net/mlx5: add resize function to ipool Gregory Etelson
2024-02-02 11:56 ` [PATCH 3/5] net/mlx5: fix parameters verification in HWS table create Gregory Etelson
2024-02-02 11:56 ` [PATCH 4/5] net/mlx5: move multi-pattern actions management to table level Gregory Etelson
2024-02-02 11:56 ` Gregory Etelson [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240202115611.288892-6-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=dsosnowski@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=mkashani@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=suanmingm@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).