DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@nvidia.com>
To: <viacheslavo@nvidia.com>, <matan@nvidia.com>
Cc: <rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH v2 04/22] net/mlx5: replace flow list with index pool
Date: Wed, 30 Jun 2021 15:45:51 +0300	[thread overview]
Message-ID: <20210630124609.8711-5-suanmingm@nvidia.com> (raw)
In-Reply-To: <20210630124609.8711-1-suanmingm@nvidia.com>

The flow list is used to save the create flows and to be used only
when port closes all the flows need to be flushed.

This commit takes advantage of the index pool foreach operation to
flush all the allocated flows.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c   |  48 +++++++++-
 drivers/net/mlx5/mlx5.c            |   9 +-
 drivers/net/mlx5/mlx5.h            |  14 ++-
 drivers/net/mlx5/mlx5_flow.c       | 149 ++++++++++-------------------
 drivers/net/mlx5/mlx5_flow.h       |   2 +-
 drivers/net/mlx5/mlx5_flow_dv.c    |   5 +
 drivers/net/mlx5/mlx5_trigger.c    |   8 +-
 drivers/net/mlx5/windows/mlx5_os.c |   1 -
 8 files changed, 126 insertions(+), 110 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 92b3009786..31cc8d9eb8 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -69,6 +69,44 @@ static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 /* Process local data for secondary processes. */
 static struct mlx5_local_data mlx5_local_data;
 
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "ctl_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 1 << 14,
+		.type = "rte_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "mcp_flow_ipool",
+	},
+};
+
 /**
  * Set the completion channel file descriptor interrupt as non-blocking.
  *
@@ -823,6 +861,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	int own_domain_id = 0;
 	uint16_t port_id;
 	struct mlx5_port_info vport_info = { .query_flags = 0 };
+	int i;
 
 	/* Determine if this port representor is supposed to be spawned. */
 	if (switch_info->representor && dpdk_dev->devargs &&
@@ -1566,7 +1605,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 				      mlx5_ifindex(eth_dev),
 				      eth_dev->data->mac_addrs,
 				      MLX5_MAX_MAC_ADDRESSES);
-	priv->flows = 0;
 	priv->ctrl_flows = 0;
 	rte_spinlock_init(&priv->flow_list_lock);
 	TAILQ_INIT(&priv->flow_meters);
@@ -1600,6 +1638,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	mlx5_set_min_inline(spawn, config);
 	/* Store device configuration on private structure. */
 	priv->config = *config;
+	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+		icfg[i].release_mem_en = !!config->reclaim_mode;
+		if (config->reclaim_mode)
+			icfg[i].per_core_cache = 0;
+		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+		if (!priv->flows[i])
+			goto error;
+	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
 	if (config->dv_flow_en) {
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cf1815cb74..fcfc3dcdca 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -322,7 +322,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.grow_trunk = 3,
 		.grow_shift = 2,
 		.need_lock = 1,
-		.release_mem_en = 1,
+		.release_mem_en = 0,
+		.per_core_cache = 1 << 19,
 		.malloc = mlx5_malloc,
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
@@ -792,8 +793,10 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
 				MLX5_FLOW_HANDLE_VERBS_SIZE;
 			break;
 		}
-		if (config->reclaim_mode)
+		if (config->reclaim_mode) {
 			cfg.release_mem_en = 1;
+			cfg.per_core_cache = 0;
+		}
 		sh->ipool[i] = mlx5_ipool_create(&cfg);
 	}
 }
@@ -1528,7 +1531,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	 * If all the flows are already flushed in the device stop stage,
 	 * then this will return directly without any action.
 	 */
-	mlx5_flow_list_flush(dev, &priv->flows, true);
+	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
 	mlx5_action_handle_flush(dev);
 	mlx5_flow_meter_flush(dev, NULL);
 	/* Prevent crashes when queues are still in use. */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 32b2817bf2..5fa5d3cb99 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -71,6 +71,14 @@ enum mlx5_reclaim_mem_mode {
 	MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
 };
 
+/* The type of flow. */
+enum mlx5_flow_type {
+	MLX5_FLOW_TYPE_CTL, /* Control flow. */
+	MLX5_FLOW_TYPE_GEN, /* General flow. */
+	MLX5_FLOW_TYPE_MCP, /* MCP flow. */
+	MLX5_FLOW_TYPE_MAXI,
+};
+
 /* Hash and cache list callback context. */
 struct mlx5_flow_cb_ctx {
 	struct rte_eth_dev *dev;
@@ -1344,7 +1352,8 @@ struct mlx5_priv {
 	unsigned int (*reta_idx)[]; /* RETA index table. */
 	unsigned int reta_idx_n; /* RETA index size. */
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
-	uint32_t flows; /* RTE Flow rules. */
+	struct mlx5_indexed_pool *flows[MLX5_FLOW_TYPE_MAXI];
+	/* RTE Flow rules. */
 	uint32_t ctrl_flows; /* Control flow rules. */
 	rte_spinlock_t flow_list_lock;
 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
@@ -1596,7 +1605,8 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
 				  struct rte_flow_error *error);
 int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
 		      struct rte_flow_error *error);
-void mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+			  bool active);
 int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
 int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
 		    const struct rte_flow_action *action, void *data,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c5d4a95a8f..20ce0ed424 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -3095,31 +3095,6 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
-/**
- * Release resource related QUEUE/RSS action split.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param flow
- *   Flow to release id's from.
- */
-static void
-flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
-			     struct rte_flow *flow)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	uint32_t handle_idx;
-	struct mlx5_flow_handle *dev_handle;
-
-	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
-		       handle_idx, dev_handle, next)
-		if (dev_handle->split_flow_id &&
-		    !dev_handle->is_meter_flow_id)
-			mlx5_ipool_free(priv->sh->ipool
-					[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-					dev_handle->split_flow_id);
-}
-
 static int
 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
 		   const struct rte_flow_attr *attr __rte_unused,
@@ -3415,7 +3390,6 @@ flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	flow_mreg_split_qrss_release(dev, flow);
 	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	fops->destroy(dev, flow);
@@ -3998,14 +3972,14 @@ flow_check_hairpin_split(struct rte_eth_dev *dev,
 
 /* Declare flow create/destroy prototype in advance. */
 static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action actions[],
 		 bool external, struct rte_flow_error *error);
 
 static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		  uint32_t flow_idx);
 
 int
@@ -4127,8 +4101,8 @@ flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
 	 * be applied, removed, deleted in ardbitrary order
 	 * by list traversing.
 	 */
-	mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
-					 actions, false, error);
+	mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
+					&attr, items, actions, false, error);
 	if (!mcp_res->rix_flow) {
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
 		return NULL;
@@ -4190,7 +4164,7 @@ flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
 	struct mlx5_priv *priv = dev->data->dev_private;
 
 	MLX5_ASSERT(mcp_res->rix_flow);
-	flow_list_destroy(dev, NULL, mcp_res->rix_flow);
+	flow_list_destroy(dev, MLX5_FLOW_TYPE_MCP, mcp_res->rix_flow);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
 }
 
@@ -6093,7 +6067,7 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
  *   A flow index on success, 0 otherwise and rte_errno is set.
  */
 static uint32_t
-flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		 const struct rte_flow_attr *attr,
 		 const struct rte_flow_item items[],
 		 const struct rte_flow_action original_actions[],
@@ -6161,7 +6135,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 				external, hairpin_flow, error);
 	if (ret < 0)
 		goto error_before_hairpin_split;
-	flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
+	flow = mlx5_ipool_zmalloc(priv->flows[type], &idx);
 	if (!flow) {
 		rte_errno = ENOMEM;
 		goto error_before_hairpin_split;
@@ -6291,12 +6265,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 		if (ret < 0)
 			goto error;
 	}
-	if (list) {
-		rte_spinlock_lock(&priv->flow_list_lock);
-		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
-			     flow, next);
-		rte_spinlock_unlock(&priv->flow_list_lock);
-	}
+	flow->type = type;
 	flow_rxq_flags_set(dev, flow);
 	rte_free(translated_actions);
 	tunnel = flow_tunnel_from_rule(wks->flows);
@@ -6318,7 +6287,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 			mlx5_ipool_get
 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
 			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
+	mlx5_ipool_free(priv->flows[type], idx);
 	rte_errno = ret; /* Restore rte_errno. */
 	ret = rte_errno;
 	rte_errno = ret;
@@ -6370,10 +6339,9 @@ mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
 			.type = RTE_FLOW_ACTION_TYPE_END,
 		},
 	};
-	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow_error error;
 
-	return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
+	return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
 						   &attr, &pattern,
 						   actions, false, &error);
 }
@@ -6425,8 +6393,6 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		 const struct rte_flow_action actions[],
 		 struct rte_flow_error *error)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-
 	/*
 	 * If the device is not started yet, it is not allowed to created a
 	 * flow from application. PMD default flows and traffic control flows
@@ -6442,8 +6408,9 @@ mlx5_flow_create(struct rte_eth_dev *dev,
 		return NULL;
 	}
 
-	return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
-				  attr, items, actions, true, error);
+	return (void *)(uintptr_t)flow_list_create(dev, MLX5_FLOW_TYPE_GEN,
+						   attr, items, actions,
+						   true, error);
 }
 
 /**
@@ -6451,24 +6418,19 @@ mlx5_flow_create(struct rte_eth_dev *dev,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list. If this parameter NULL,
- *   there is no flow removal from the list. Be noted that as
- *   flow is add to the indexed list, memory of the indexed
- *   list points to maybe changed as flow destroyed.
  * @param[in] flow_idx
  *   Index of flow to destroy.
  */
 static void
-flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
+flow_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 		  uint32_t flow_idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
-					       [MLX5_IPOOL_RTE_FLOW], flow_idx);
+	struct rte_flow *flow = mlx5_ipool_get(priv->flows[type], flow_idx);
 
 	if (!flow)
 		return;
+	MLX5_ASSERT(flow->type == type);
 	/*
 	 * Update RX queue flags only if port is started, otherwise it is
 	 * already clean.
@@ -6476,12 +6438,6 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 	if (dev->data->dev_started)
 		flow_rxq_flags_trim(dev, flow);
 	flow_drv_destroy(dev, flow);
-	if (list) {
-		rte_spinlock_lock(&priv->flow_list_lock);
-		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
-			     flow_idx, flow, next);
-		rte_spinlock_unlock(&priv->flow_list_lock);
-	}
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
 
@@ -6491,7 +6447,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
 	flow_mreg_del_copy_action(dev, flow);
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
+	mlx5_ipool_free(priv->flows[type], flow_idx);
 }
 
 /**
@@ -6499,18 +6455,21 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param list
- *   Pointer to the Indexed flow list.
+ * @param type
+ *   Flow type to be flushed.
  * @param active
  *   If flushing is called avtively.
  */
 void
-mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
+		     bool active)
 {
-	uint32_t num_flushed = 0;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	uint32_t num_flushed = 0, fidx = 1;
+	struct rte_flow *flow;
 
-	while (*list) {
-		flow_list_destroy(dev, list, *list);
+	MLX5_IPOOL_FOREACH(priv->flows[type], fidx, flow) {
+		flow_list_destroy(dev, type, fidx);
 		num_flushed++;
 	}
 	if (active) {
@@ -6682,18 +6641,19 @@ mlx5_flow_pop_thread_workspace(void)
  * @return the number of flows not released.
  */
 int
-mlx5_flow_verify(struct rte_eth_dev *dev)
+mlx5_flow_verify(struct rte_eth_dev *dev __rte_unused)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow;
-	uint32_t idx;
-	int ret = 0;
+	uint32_t idx = 0;
+	int ret = 0, i;
 
-	ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
-		      flow, next) {
-		DRV_LOG(DEBUG, "port %u flow %p still referenced",
-			dev->data->port_id, (void *)flow);
-		++ret;
+	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+		MLX5_IPOOL_FOREACH(priv->flows[i], idx, flow) {
+			DRV_LOG(DEBUG, "port %u flow %p still referenced",
+				dev->data->port_id, (void *)flow);
+			ret++;
+		}
 	}
 	return ret;
 }
@@ -6713,7 +6673,6 @@ int
 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
 			    uint32_t queue)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct rte_flow_attr attr = {
 		.egress = 1,
 		.priority = 0,
@@ -6746,8 +6705,8 @@ mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
 	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
 	actions[0].conf = &jump;
 	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
-	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+				    &attr, items, actions, false, &error);
 	if (!flow_idx) {
 		DRV_LOG(DEBUG,
 			"Failed to create ctrl flow: rte_errno(%d),"
@@ -6836,8 +6795,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
 		action_rss.types = 0;
 	for (i = 0; i != priv->reta_idx_n; ++i)
 		queue[i] = (*priv->reta_idx)[i];
-	flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+				    &attr, items, actions, false, &error);
 	if (!flow_idx)
 		return -rte_errno;
 	return 0;
@@ -6878,7 +6837,6 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
 int
 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	/*
 	 * The LACP matching is done by only using ether type since using
 	 * a multicast dst mac causes kernel to give low priority to this flow.
@@ -6912,8 +6870,9 @@ mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
 		},
 	};
 	struct rte_flow_error error;
-	uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
-				&attr, items, actions, false, &error);
+	uint32_t flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
+					&attr, items, actions,
+					false, &error);
 
 	if (!flow_idx)
 		return -rte_errno;
@@ -6931,9 +6890,8 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
 		  struct rte_flow *flow,
 		  struct rte_flow_error *error __rte_unused)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
+	flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN,
+				(uintptr_t)(void *)flow);
 	return 0;
 }
 
@@ -6947,9 +6905,7 @@ int
 mlx5_flow_flush(struct rte_eth_dev *dev,
 		struct rte_flow_error *error __rte_unused)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	mlx5_flow_list_flush(dev, &priv->flows, false);
+	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, false);
 	return 0;
 }
 
@@ -7000,8 +6956,7 @@ flow_drv_query(struct rte_eth_dev *dev,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const struct mlx5_flow_driver_ops *fops;
-	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
-					       [MLX5_IPOOL_RTE_FLOW],
+	struct rte_flow *flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
 					       flow_idx);
 	enum mlx5_flow_drv_type ftype;
 
@@ -7867,14 +7822,14 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 		if (!config->dv_flow_en)
 			break;
 		/* Create internal flow, validation skips copy action. */
-		flow_idx = flow_list_create(dev, NULL, &attr, items,
-					    actions, false, &error);
-		flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+		flow_idx = flow_list_create(dev, MLX5_FLOW_TYPE_GEN, &attr,
+					items, actions, false, &error);
+		flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
 				      flow_idx);
 		if (!flow)
 			continue;
 		config->flow_mreg_c[n++] = idx;
-		flow_list_destroy(dev, NULL, flow_idx);
+		flow_list_destroy(dev, MLX5_FLOW_TYPE_GEN, flow_idx);
 	}
 	for (; n < MLX5_MREG_C_NUM; ++n)
 		config->flow_mreg_c[n] = REG_NON;
@@ -7918,8 +7873,8 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
 					sh->rx_domain,
 					sh->tx_domain, file);
 	/* dump one */
-	flow = mlx5_ipool_get(priv->sh->ipool
-			[MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
+	flow = mlx5_ipool_get(priv->flows[MLX5_FLOW_TYPE_GEN],
+			(uintptr_t)(void *)flow_idx);
 	if (!flow)
 		return -ENOENT;
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2f2aa962f9..d9b6acaafd 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -997,9 +997,9 @@ flow_items_to_tunnel(const struct rte_flow_item items[])
 
 /* Flow structure. */
 struct rte_flow {
-	ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
 	uint32_t dev_handles;
 	/**< Device flow handles that are part of the flow. */
+	uint32_t type:2;
 	uint32_t drv_type:2; /**< Driver type. */
 	uint32_t tunnel:1;
 	uint32_t meter:24; /**< Holds flow meter id. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index c5d4b01e57..67f7243503 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -13844,6 +13844,11 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
 		    dev_handle->split_flow_id)
 			mlx5_ipool_free(fm->flow_ipool,
 					dev_handle->split_flow_id);
+		else if (dev_handle->split_flow_id &&
+		    !dev_handle->is_meter_flow_id)
+			mlx5_ipool_free(priv->sh->ipool
+					[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+					dev_handle->split_flow_id);
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
 			   tmp_idx);
 	}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index ae7fcca229..7cb8920d6b 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1187,7 +1187,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
 	/* Control flows for default traffic can be removed firstly. */
 	mlx5_traffic_disable(dev);
 	/* All RX queue flags will be cleared in the flush interface. */
-	mlx5_flow_list_flush(dev, &priv->flows, true);
+	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
 	mlx5_flow_meter_rxq_flush(dev);
 	mlx5_rx_intr_vec_disable(dev);
 	priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
@@ -1370,7 +1370,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
 	return 0;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -1385,9 +1385,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
 void
 mlx5_traffic_disable(struct rte_eth_dev *dev)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+	mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
 }
 
 /**
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 3fe3f55f49..7d15c998bb 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -563,7 +563,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	eth_dev->rx_queue_count = mlx5_rx_queue_count;
 	/* Register MAC address. */
 	claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
-	priv->flows = 0;
 	priv->ctrl_flows = 0;
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
-- 
2.25.1


  parent reply	other threads:[~2021-06-30 12:47 UTC|newest]

Thread overview: 135+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-27  9:33 [dpdk-dev] [PATCH 0/4] net/mlx5: add indexed pool local cache Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 1/4] net/mlx5: add index allocate with up limit Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 2/4] net/mlx5: add indexed pool local cache Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 3/4] net/mlx5: add index pool cache flush Suanming Mou
2021-05-27  9:34 ` [dpdk-dev] [PATCH 4/4] net/mlx5: replace flow list with index pool Suanming Mou
2021-06-30 12:45 ` [dpdk-dev] [PATCH v2 00/22] net/mlx5: insertion rate optimization Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 01/22] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 02/22] net/mlx5: add indexed pool local cache Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 03/22] net/mlx5: add index pool foreach define Suanming Mou
2021-06-30 12:45   ` Suanming Mou [this message]
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 05/22] net/mlx5: optimize modify header action memory Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 06/22] net/mlx5: remove cache term from the list utility Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 07/22] net/mlx5: add per lcore cache to " Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 08/22] net/mlx5: minimize list critical sections Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 09/22] net/mlx5: manage list cache entries release Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 10/22] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 11/22] net/mlx5: allocate list memory by the create API Suanming Mou
2021-06-30 12:45   ` [dpdk-dev] [PATCH v2 12/22] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 13/22] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 14/22] net/mlx5: adjust the hash bucket size Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 15/22] common/mlx5: allocate cache list memory individually Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 16/22] net/mlx5: enable index pool per-core cache Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 17/22] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 18/22] common/mlx5: optimize cache list object memory Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 19/22] net/mlx5: change memory release configuration Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 20/22] net/mlx5: support index pool none local core operations Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 21/22] net/mlx5: support list " Suanming Mou
2021-06-30 12:46   ` [dpdk-dev] [PATCH v2 22/22] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-02  6:17 ` [dpdk-dev] [PATCH v3 00/22] net/mlx5: insertion rate optimization Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 01/22] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 02/22] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 03/22] net/mlx5: add index pool foreach define Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 04/22] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-02  6:17   ` [dpdk-dev] [PATCH v3 05/22] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 06/22] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 07/22] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 08/22] net/mlx5: minimize list critical sections Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 09/22] net/mlx5: manage list cache entries release Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 10/22] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 11/22] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 12/22] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 13/22] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 14/22] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 15/22] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 16/22] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 17/22] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 18/22] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 19/22] net/mlx5: change memory release configuration Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 20/22] net/mlx5: support index pool none local core operations Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 21/22] net/mlx5: support list " Suanming Mou
2021-07-02  6:18   ` [dpdk-dev] [PATCH v3 22/22] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-06 13:32 ` [dpdk-dev] [PATCH v4 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-06 13:32   ` [dpdk-dev] [PATCH v4 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-12  1:46 ` [dpdk-dev] [PATCH v5 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-12 14:59     ` Raslan Darawsheh
2021-07-12 23:26       ` Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-12  1:46   ` [dpdk-dev] [PATCH v5 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-13  8:44 ` [dpdk-dev] [PATCH v6 00/26] net/mlx5: insertion rate optimization Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 01/26] net/mlx5: allow limiting the index pool maximum index Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 02/26] net/mlx5: add indexed pool local cache Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 03/26] net/mlx5: add index pool foreach define Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 04/26] net/mlx5: support index pool non-lcore operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 05/26] net/mlx5: replace flow list with index pool Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 06/26] net/mlx5: optimize modify header action memory Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 07/26] net/mlx5: remove cache term from the list utility Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 08/26] net/mlx5: add per lcore cache to " Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 09/26] net/mlx5: minimize list critical sections Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 10/26] net/mlx5: manage list cache entries release Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 11/26] net/mlx5: relax the list utility atomic operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 12/26] net/mlx5: allocate list memory by the create API Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 13/26] common/mlx5: move list utility to common Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 14/26] common/mlx5: add list lcore share Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 15/26] common/mlx5: call list callbacks with context Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 16/26] common/mlx5: add per-lcore cache to hash list utility Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 17/26] common/mlx5: allocate cache list memory individually Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 18/26] common/mlx5: optimize cache list object memory Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 19/26] common/mlx5: support list non-lcore operations Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 20/26] net/mlx5: move modify header allocator to ipool Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 21/26] net/mlx5: adjust the hash bucket size Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 22/26] net/mlx5: enable index pool per-core cache Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 23/26] net/mlx5: optimize hash list table allocate on demand Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 24/26] net/mlx5: change memory release configuration Suanming Mou
2021-07-13  8:44   ` [dpdk-dev] [PATCH v6 25/26] net/mlx5: optimize Rx queue match Suanming Mou
2021-07-13  8:45   ` [dpdk-dev] [PATCH v6 26/26] doc: add mlx5 multiple-thread flow insertion optimization Suanming Mou
2021-07-13 15:18   ` [dpdk-dev] [PATCH v6 00/26] net/mlx5: insertion rate optimization Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210630124609.8711-5-suanmingm@nvidia.com \
    --to=suanmingm@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).