DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: orika@mellanox.com, wentaoc@mellanox.com, rasland@mellanox.com,
	dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 09/10] net/mlx5: allocate rte flow from indexed pool
Date: Thu, 16 Apr 2020 16:34:30 +0800	[thread overview]
Message-ID: <1587026071-422636-10-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1587026071-422636-1-git-send-email-suanmingm@mellanox.com>

This commit allocates rte flow from indexed memory pool.

Allocate rte flow memory from indexed memory pool helps save more than
MALLOC_ELEM_OVERHEAD bytes memory from rte_malloc().

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.c      |  13 ++-
 drivers/net/mlx5/mlx5.h      |  15 ++-
 drivers/net/mlx5/mlx5_flow.c | 234 ++++++++++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_flow.h |   6 +-
 4 files changed, 160 insertions(+), 108 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 1bb464d..9ab4b1f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -300,6 +300,15 @@ struct mlx5_dev_spawn_data {
 		.free = rte_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 4096,
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.malloc = rte_malloc_socket,
+		.free = rte_free,
+		.type = "rte_flow_ipool",
+	},
 };
 
 
@@ -2885,8 +2894,8 @@ struct mlx5_flow_id_pool *
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
 				      MLX5_MAX_MAC_ADDRESSES);
-	TAILQ_INIT(&priv->flows);
-	TAILQ_INIT(&priv->ctrl_flows);
+	priv->flows = 0;
+	priv->ctrl_flows = 0;
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
 	/* Hint libmlx5 to use PMD allocator for data plane resources */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 164ca3a..318618e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -56,6 +56,7 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
 	MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
+	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_MAX,
 };
 
@@ -109,9 +110,6 @@ struct mlx5_stats_ctrl {
 	uint64_t imissed;
 };
 
-/* Flow list . */
-TAILQ_HEAD(mlx5_flows, rte_flow);
-
 /* Default PMD specific parameter value. */
 #define MLX5_ARG_UNSET (-1)
 
@@ -512,8 +510,8 @@ struct mlx5_priv {
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
-	struct mlx5_flows flows; /* RTE Flow rules. */
-	struct mlx5_flows ctrl_flows; /* Control flow rules. */
+	uint32_t flows; /* RTE Flow rules. */
+	uint32_t ctrl_flows; /* Control flow rules. */
 	void *inter_flows; /* Intermediate resources for flow creation. */
 	void *rss_desc; /* Intermediate rss description resources. */
 	int flow_idx; /* Intermediate device flow index. */
@@ -715,8 +713,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
 				  struct rte_flow_error *error);
 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
-void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
-			  bool active);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
@@ -727,8 +724,8 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
 			 enum rte_filter_type filter_type,
 			 enum rte_filter_op filter_op,
 			 void *arg);
-int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
-void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list);
+void mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list);
 int mlx5_flow_start_default(struct rte_eth_dev *dev);
 void mlx5_flow_stop_default(struct rte_eth_dev *dev);
 void mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4f74e6b..c529aa3 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2906,16 +2906,16 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 }
 
 /* Declare flow create/destroy prototype in advance. */
-static struct rte_flow *
-flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+static uint32_t
+flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
 		 bool external, struct rte_flow_error *error);
 
 static void
-flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
-		  struct rte_flow *flow);
+flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+		  uint32_t flow_idx);
 
 /**
  * Add a flow of copying flow metadata registers in RX_CP_TBL.
@@ -3052,9 +3052,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * be applied, removed, deleted in ardbitrary order
 	 * by list traversing.
 	 */
-	mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
+	mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
 					 actions, false, error);
-	if (!mcp_res->flow)
+	if (!mcp_res->rix_flow)
 		goto error;
 	mcp_res->refcnt++;
 	mcp_res->hlist_ent.key = mark_id;
@@ -3065,8 +3065,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		goto error;
 	return mcp_res;
 error:
-	if (mcp_res->flow)
-		flow_list_destroy(dev, NULL, mcp_res->flow);
+	if (mcp_res->rix_flow)
+		flow_list_destroy(dev, NULL, mcp_res->rix_flow);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 	return NULL;
 }
@@ -3096,8 +3096,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		MLX5_ASSERT(mcp_res->appcnt);
 		flow->copy_applied = 0;
 		--mcp_res->appcnt;
-		if (!mcp_res->appcnt)
-			flow_drv_remove(dev, mcp_res->flow);
+		if (!mcp_res->appcnt) {
+			struct rte_flow *mcp_flow = mlx5_ipool_get
+					(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+					mcp_res->rix_flow);
+
+			if (mcp_flow)
+				flow_drv_remove(dev, mcp_flow);
+		}
 	}
 	/*
 	 * We do not check availability of metadata registers here,
@@ -3105,8 +3111,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 */
 	if (--mcp_res->refcnt)
 		return;
-	MLX5_ASSERT(mcp_res->flow);
-	flow_list_destroy(dev, NULL, mcp_res->flow);
+	MLX5_ASSERT(mcp_res->rix_flow);
+	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 	flow->rix_mreg_copy = 0;
@@ -3138,9 +3144,15 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	if (!mcp_res)
 		return 0;
 	if (!mcp_res->appcnt) {
-		ret = flow_drv_apply(dev, mcp_res->flow, NULL);
-		if (ret)
-			return ret;
+		struct rte_flow *mcp_flow = mlx5_ipool_get
+				(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+				mcp_res->rix_flow);
+
+		if (mcp_flow) {
+			ret = flow_drv_apply(dev, mcp_flow, NULL);
+			if (ret)
+				return ret;
+		}
 	}
 	++mcp_res->appcnt;
 	flow->copy_applied = 1;
@@ -3171,8 +3183,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	MLX5_ASSERT(mcp_res->appcnt);
 	--mcp_res->appcnt;
 	flow->copy_applied = 0;
-	if (!mcp_res->appcnt)
-		flow_drv_remove(dev, mcp_res->flow);
+	if (!mcp_res->appcnt) {
+		struct rte_flow *mcp_flow = mlx5_ipool_get
+				(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+				mcp_res->rix_flow);
+
+		if (mcp_flow)
+			flow_drv_remove(dev, mcp_flow);
+	}
 }
 
 /**
@@ -3194,8 +3212,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    MLX5_DEFAULT_COPY_ID);
 	if (!mcp_res)
 		return;
-	MLX5_ASSERT(mcp_res->flow);
-	flow_list_destroy(dev, NULL, mcp_res->flow);
+	MLX5_ASSERT(mcp_res->rix_flow);
+	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 }
@@ -4218,10 +4236,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   Perform verbose error reporting if not NULL.
  *
  * @return
- *   A flow on success, NULL otherwise and rte_errno is set.
+ *   A flow index on success, 0 otherwise and rte_errno is set.
  */
-static struct rte_flow *
-flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
+static uint32_t
+flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
@@ -4251,6 +4269,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	struct mlx5_flow_rss_desc *rss_desc = priv->rss_desc;
 	const struct rte_flow_action *p_actions_rx = actions;
 	uint32_t i;
+	uint32_t idx = 0;
 	int hairpin_flow = 0;
 	uint32_t hairpin_id = 0;
 	struct rte_flow_attr attr_tx = { .priority = 0 };
@@ -4258,19 +4277,19 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				    error);
 
 	if (ret < 0)
-		return NULL;
+		return 0;
 	hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
 	if (hairpin_flow > 0) {
 		if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
 			rte_errno = EINVAL;
-			return NULL;
+			return 0;
 		}
 		flow_hairpin_split(dev, actions, actions_rx.actions,
 				   actions_hairpin_tx.actions, items_tx.items,
 				   &hairpin_id);
 		p_actions_rx = actions_rx.actions;
 	}
-	flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
+	flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
 	if (!flow) {
 		rte_errno = ENOMEM;
 		goto error_before_flow;
@@ -4373,19 +4392,20 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 			goto error;
 	}
 	if (list)
-		TAILQ_INSERT_TAIL(list, flow, next);
+		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
+			     flow, next);
 	flow_rxq_flags_set(dev, flow);
 	/* Nested flow creation index recovery. */
 	priv->flow_idx = priv->flow_nested_idx;
 	if (priv->flow_nested_idx)
 		priv->flow_nested_idx = 0;
-	return flow;
+	return idx;
 error:
 	MLX5_ASSERT(flow);
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	flow_mreg_del_copy_action(dev, flow);
 	flow_drv_destroy(dev, flow);
-	rte_free(flow);
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
 	rte_errno = ret; /* Restore rte_errno. */
 error_before_flow:
 	ret = rte_errno;
@@ -4396,7 +4416,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	priv->flow_idx = priv->flow_nested_idx;
 	if (priv->flow_nested_idx)
 		priv->flow_nested_idx = 0;
-	return NULL;
+	return 0;
 }
 
 /**
@@ -4444,8 +4464,9 @@ struct rte_flow *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow_error error;
 
-	return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
-				actions, false, &error);
+	return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+						   &attr, &pattern,
+						   actions, false, &error);
 }
 
 /**
@@ -4474,8 +4495,8 @@ struct rte_flow *
 			"inserting a flow", dev->data->port_id);
 		return NULL;
 	}
-	return flow_list_create(dev, &priv->flows,
-				attr, items, actions, true, error);
+	return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
+				  attr, items, actions, true, error);
 }
 
 /**
@@ -4484,18 +4505,24 @@ struct rte_flow *
  * @param dev
  *   Pointer to Ethernet device.
  * @param list
- *   Pointer to a TAILQ flow list. If this parameter NULL,
- *   there is no flow removal from the list.
- * @param[in] flow
- *   Flow to destroy.
+ *   Pointer to the Indexed flow list. If this parameter NULL,
+ *   there is no flow removal from the list. Be noted that as
+ *   flow is add to the indexed list, memory of the indexed
+ *   list points to maybe changed as flow destroyed.
+ * @param[in] flow_idx
+ *   Index of flow to destroy.
  */
 static void
-flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
-		  struct rte_flow *flow)
+flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+		  uint32_t flow_idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
+					       [MLX5_IPOOL_RTE_FLOW], flow_idx);
 
+	if (!flow)
+		return;
 	/*
 	 * Update RX queue flags only if port is started, otherwise it is
 	 * already clean.
@@ -4507,11 +4534,12 @@ struct rte_flow *
 				     flow->hairpin_flow_id);
 	flow_drv_destroy(dev, flow);
 	if (list)
-		TAILQ_REMOVE(list, flow, next);
+		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
+			     flow_idx, flow, next);
 	flow_mreg_del_copy_action(dev, flow);
 	if (flow->fdir) {
 		LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
-			if (priv_fdir_flow->flow == flow)
+			if (priv_fdir_flow->rix_flow == flow_idx)
 				break;
 		}
 		if (priv_fdir_flow) {
@@ -4520,7 +4548,7 @@ struct rte_flow *
 			rte_free(priv_fdir_flow);
 		}
 	}
-	rte_free(flow);
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
 }
 
 /**
@@ -4529,21 +4557,17 @@ struct rte_flow *
  * @param dev
  *   Pointer to Ethernet device.
  * @param list
- *   Pointer to a TAILQ flow list.
+ *   Pointer to the Indexed flow list.
  * @param active
  *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
-		     bool active)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
 {
 	uint32_t num_flushed = 0;
 
-	while (!TAILQ_EMPTY(list)) {
-		struct rte_flow *flow;
-
-		flow = TAILQ_FIRST(list);
-		flow_list_destroy(dev, list, flow);
+	while (*list) {
+		flow_list_destroy(dev, list, *list);
 		num_flushed++;
 	}
 	if (active) {
@@ -4558,14 +4582,17 @@ struct rte_flow *
  * @param dev
  *   Pointer to Ethernet device.
  * @param list
- *   Pointer to a TAILQ flow list.
+ *   Pointer to the Indexed flow list.
  */
 void
-mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_stop(struct rte_eth_dev *dev, uint32_t *list)
 {
-	struct rte_flow *flow;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct rte_flow *flow = NULL;
+	uint32_t idx;
 
-	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
+		      flow, next) {
 		flow_drv_remove(dev, flow);
 		flow_mreg_stop_copy_action(dev, flow);
 	}
@@ -4579,16 +4606,18 @@ struct rte_flow *
  * @param dev
  *   Pointer to Ethernet device.
  * @param list
- *   Pointer to a TAILQ flow list.
+ *   Pointer to the Indexed flow list.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
+mlx5_flow_start(struct rte_eth_dev *dev, uint32_t *list)
 {
-	struct rte_flow *flow;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct rte_flow *flow = NULL;
 	struct rte_flow_error error;
+	uint32_t idx;
 	int ret = 0;
 
 	/* Make sure default copy action (reg_c[0] -> reg_b) is created. */
@@ -4596,7 +4625,8 @@ struct rte_flow *
 	if (ret < 0)
 		return -rte_errno;
 	/* Apply Flows created by application. */
-	TAILQ_FOREACH(flow, list, next) {
+	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], *list, idx,
+		      flow, next) {
 		ret = flow_mreg_start_copy_action(dev, flow);
 		if (ret < 0)
 			goto error;
@@ -4698,9 +4728,11 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow;
+	uint32_t idx;
 	int ret = 0;
 
-	TAILQ_FOREACH(flow, &priv->flows, next) {
+	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
+		      flow, next) {
 		DRV_LOG(DEBUG, "port %u flow %p still referenced",
 			dev->data->port_id, (void *)flow);
 		++ret;
@@ -4749,15 +4781,15 @@ struct rte_flow *
 		.group = MLX5_HAIRPIN_TX_TABLE,
 	};
 	struct rte_flow_action actions[2];
-	struct rte_flow *flow;
+	uint32_t flow_idx;
 	struct rte_flow_error error;
 
 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	actions[0].conf = &jump;
 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
-	flow = flow_list_create(dev, &priv->ctrl_flows,
+	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
 				&attr, items, actions, false, &error);
-	if (!flow) {
+	if (!flow_idx) {
 		DRV_LOG(DEBUG,
 			"Failed to create ctrl flow: rte_errno(%d),"
 			" type(%d), message(%s)",
@@ -4834,7 +4866,7 @@ struct rte_flow *
 			.type = RTE_FLOW_ACTION_TYPE_END,
 		},
 	};
-	struct rte_flow *flow;
+	uint32_t flow_idx;
 	struct rte_flow_error error;
 	unsigned int i;
 
@@ -4843,9 +4875,9 @@ struct rte_flow *
 	}
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
-	flow = flow_list_create(dev, &priv->ctrl_flows,
+	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
 				&attr, items, actions, false, &error);
-	if (!flow)
+	if (!flow_idx)
 		return -rte_errno;
 	return 0;
 }
@@ -4884,7 +4916,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
 	return 0;
 }
 
@@ -4940,14 +4972,25 @@ struct rte_flow *
  */
 static int
 flow_drv_query(struct rte_eth_dev *dev,
-	       struct rte_flow *flow,
+	       uint32_t flow_idx,
 	       const struct rte_flow_action *actions,
 	       void *data,
 	       struct rte_flow_error *error)
 {
+	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct mlx5_flow_driver_ops *fops;
-	enum mlx5_flow_drv_type ftype = flow->drv_type;
+	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
+					       [MLX5_IPOOL_RTE_FLOW],
+					       flow_idx);
+	enum mlx5_flow_drv_type ftype;
 
+	if (!flow) {
+		return rte_flow_error_set(error, ENOENT,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  NULL,
+			  "invalid flow handle");
+	}
+	ftype = flow->drv_type;
 	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(ftype);
 
@@ -4969,7 +5012,8 @@ struct rte_flow *
 {
 	int ret;
 
-	ret = flow_drv_query(dev, flow, actions, data, error);
+	ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
+			     error);
 	if (ret < 0)
 		return ret;
 	return 0;
@@ -5205,25 +5249,25 @@ struct rte_flow *
  *   FDIR flow to lookup.
  *
  * @return
- *   Pointer of flow if found, NULL otherwise.
+ *   Index of flow if found, 0 otherwise.
  */
-static struct rte_flow *
+static uint32_t
 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = NULL;
+	uint32_t flow_idx = 0;
 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
 
 	MLX5_ASSERT(fdir_flow);
 	LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
 		if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
-			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
-				dev->data->port_id, (void *)flow);
-			flow = priv_fdir_flow->flow;
+			DRV_LOG(DEBUG, "port %u found FDIR flow %u",
+				dev->data->port_id, flow_idx);
+			flow_idx = priv_fdir_flow->rix_flow;
 			break;
 		}
 	}
-	return flow;
+	return flow_idx;
 }
 
 /**
@@ -5245,6 +5289,7 @@ struct rte_flow *
 	struct mlx5_fdir *fdir_flow;
 	struct rte_flow *flow;
 	struct mlx5_fdir_flow *priv_fdir_flow = NULL;
+	uint32_t flow_idx;
 	int ret;
 
 	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
@@ -5255,8 +5300,8 @@ struct rte_flow *
 	ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
 	if (ret)
 		goto error;
-	flow = flow_fdir_filter_lookup(dev, fdir_flow);
-	if (flow) {
+	flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
+	if (flow_idx) {
 		rte_errno = EEXIST;
 		goto error;
 	}
@@ -5266,14 +5311,15 @@ struct rte_flow *
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
-				fdir_flow->items, fdir_flow->actions, true,
-				NULL);
+	flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
+				    fdir_flow->items, fdir_flow->actions, true,
+				    NULL);
+	flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
 	if (!flow)
 		goto error;
 	flow->fdir = 1;
 	priv_fdir_flow->fdir = fdir_flow;
-	priv_fdir_flow->flow = flow;
+	priv_fdir_flow->rix_flow = flow_idx;
 	LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
 		dev->data->port_id, (void *)flow);
@@ -5300,7 +5346,7 @@ struct rte_flow *
 			const struct rte_eth_fdir_filter *fdir_filter)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow;
+	uint32_t flow_idx;
 	struct mlx5_fdir fdir_flow = {
 		.attr.group = 0,
 	};
@@ -5318,14 +5364,12 @@ struct rte_flow *
 	if (!priv_fdir_flow)
 		return 0;
 	LIST_REMOVE(priv_fdir_flow, next);
-	flow = priv_fdir_flow->flow;
-	/* Fdir resource will be releasd after flow destroy. */
-	flow->fdir = 0;
-	flow_list_destroy(dev, &priv->flows, flow);
+	flow_idx = priv_fdir_flow->rix_flow;
+	flow_list_destroy(dev, &priv->flows, flow_idx);
 	rte_free(priv_fdir_flow->fdir);
 	rte_free(priv_fdir_flow);
-	DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
-		dev->data->port_id, (void *)flow);
+	DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
+		dev->data->port_id, flow_idx);
 	return 0;
 }
 
@@ -5367,8 +5411,7 @@ struct rte_flow *
 	while (!LIST_EMPTY(&priv->fdir_flows)) {
 		priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
 		LIST_REMOVE(priv_fdir_flow, next);
-		priv_fdir_flow->flow->fdir = 0;
-		flow_list_destroy(dev, &priv->flows, priv_fdir_flow->flow);
+		flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
 		rte_free(priv_fdir_flow->fdir);
 		rte_free(priv_fdir_flow);
 	}
@@ -5910,19 +5953,22 @@ struct mlx5_meter_domains_infos *
 				.type = RTE_FLOW_ACTION_TYPE_END,
 			},
 		};
+		uint32_t flow_idx;
 		struct rte_flow *flow;
 		struct rte_flow_error error;
 
 		if (!config->dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
-		flow = flow_list_create(dev, NULL, &attr, items,
-					actions, false, &error);
+		flow_idx = flow_list_create(dev, NULL, &attr, items,
+					    actions, false, &error);
+		flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+				      flow_idx);
 		if (!flow)
 			continue;
 		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
 			config->flow_mreg_c[n++] = idx;
-		flow_list_destroy(dev, NULL, flow);
+		flow_list_destroy(dev, NULL, flow_idx);
 	}
 	for (; n < MLX5_MREG_C_NUM; ++n)
 		config->flow_mreg_c[n] = REG_NONE;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 759b0ce..35570a9 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -461,7 +461,7 @@ struct mlx5_flow_mreg_copy_resource {
 	uint32_t refcnt; /* Reference counter. */
 	uint32_t appcnt; /* Apply/Remove counter. */
 	uint32_t idx;
-	struct rte_flow *flow; /* Built flow for copy. */
+	uint32_t rix_flow; /* Built flow for copy. */
 };
 
 /* Table data structure of the hash organization. */
@@ -757,12 +757,12 @@ struct mlx5_flow_meter_profile {
 struct mlx5_fdir_flow {
 	LIST_ENTRY(mlx5_fdir_flow) next; /* Pointer to the next element. */
 	struct mlx5_fdir *fdir; /* Pointer to fdir. */
-	struct rte_flow *flow; /* Pointer to flow. */
+	uint32_t rix_flow; /* Index to flow. */
 };
 
 /* Flow structure. */
 struct rte_flow {
-	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+	ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
 	enum mlx5_flow_drv_type drv_type; /**< Driver type. */
 	uint32_t counter; /**< Holds flow counter. */
 	uint32_t rix_mreg_copy;
-- 
1.8.3.1


  parent reply	other threads:[~2020-04-16  8:36 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <https://patches.dpdk.org/cover/68470/>
2020-04-16  8:34 ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize flow structure Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 01/10] net/mlx5: reorganize fate actions as union Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 02/10] net/mlx5: optimize action flags in flow handle Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 03/10] net/mlx5: reorganize the mlx5 flow handle struct Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 04/10] net/mlx5: optimize flow meter handle type Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 05/10] net/mlx5: allocate meter from indexed pool Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 06/10] net/mlx5: convert mark copy resource to indexed Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 07/10] net/mlx5: optimize flow director filter memory Suanming Mou
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 08/10] net/mlx5: optimize mlx5 flow RSS struct Suanming Mou
2020-04-16  8:34   ` Suanming Mou [this message]
2020-04-16  8:34   ` [dpdk-dev] [PATCH v2 10/10] net/mlx5: reorganize rte flow structure Suanming Mou
2020-04-16 17:08   ` [dpdk-dev] [PATCH v2 00/10] net/mlx5: optimize " Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1587026071-422636-10-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=orika@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=wentaoc@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).