DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix nested flow creation error
@ 2020-11-09 22:57 Xueming Li
  2020-11-11  8:53 ` Matan Azrad
  0 siblings, 1 reply; 3+ messages in thread
From: Xueming Li @ 2020-11-09 22:57 UTC (permalink / raw)
  To: Matan Azrad, Viacheslav Ovsiienko
  Cc: dev, xuemingl, Asaf Penso, bingz, stable

If xmedata mode 1 enabled and create a flow with RSS and mark action,
there was an error that rdma-core failed to create RQT due to wrong
queue definition. This was due to mixed flow creation in thread specific
flow workspace.

This patch introduces nested flow workspace(context data), each flow
uses dedicate flow workspace, pop and restore workspace when nested flow
creation done, the original flow with continue with original flow
workspace. The total number of thread specific flow workspace should be
2 due to only one nested flow creation scenario so far.

Fixes: 8bb81f2649b1 ("net/mlx5: use thread specific flow workspace")
Fixes: 3ac3d8234b82 ("net/mlx5: fix index when creating flow")
Cc: bingz@mellanox.com
Cc: stable@dpdk.org

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c       | 180 +++++++++++++++++++----------
 drivers/net/mlx5/mlx5_flow.h       |   9 +-
 drivers/net/mlx5/mlx5_flow_dv.c    |  17 +--
 drivers/net/mlx5/mlx5_flow_verbs.c |   7 +-
 4 files changed, 135 insertions(+), 78 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 92adfcacca..55ee41a00b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -43,6 +43,8 @@ static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
 		     struct mlx5_flow_tunnel **tunnel);
+static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
+static void mlx5_flow_pop_thread_workspace(void);
 
 
 /** Device flow drivers. */
@@ -5451,17 +5453,15 @@ flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
 			  struct mlx5_flow_rss_desc *rss_desc,
 			  uint32_t nrssq_num)
 {
-	bool fidx = !!wks->flow_idx;
-
-	if (likely(nrssq_num <= wks->rssq_num[fidx]))
+	if (likely(nrssq_num <= wks->rssq_num))
 		return 0;
 	rss_desc->queue = realloc(rss_desc->queue,
-			  sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
+			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
 	if (!rss_desc->queue) {
 		rte_errno = ENOMEM;
 		return -1;
 	}
-	wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
+	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
 	return 0;
 }
 
@@ -5530,6 +5530,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 	struct rte_flow_action *translated_actions = NULL;
 	struct mlx5_flow_tunnel *tunnel;
 	struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
+	struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
 	struct mlx5_flow_split_info flow_split_info = {
 		.external = !!external,
 		.skip_scale = 0,
@@ -5537,12 +5538,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 		.prefix_mark = 0,
 		.prefix_layers = 0
 	};
-	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
-	bool fidx = !!wks->flow_idx;
 	int ret;
 
 	MLX5_ASSERT(wks);
-	rss_desc = &wks->rss_desc[fidx];
+	rss_desc = &wks->rss_desc;
 	ret = flow_shared_actions_translate(dev, original_actions,
 					    shared_actions,
 					    &shared_actions_n,
@@ -5606,15 +5605,6 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 	}
 	rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
 						      shared_actions_n);
-	/*
-	 * Record the start index when there is a nested call. All sub-flows
-	 * need to be translated before another calling.
-	 * No need to use ping-pong buffer to save memory here.
-	 */
-	if (fidx) {
-		MLX5_ASSERT(!wks->flow_nested_idx);
-		wks->flow_nested_idx = fidx;
-	}
 	for (i = 0; i < buf->entries; ++i) {
 		/* Initialize flow split data. */
 		flow_split_info.prefix_layers = 0;
@@ -5682,10 +5672,12 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 			goto error;
 	}
 	/*
-	 * If the flow is external (from application) OR device is started, then
-	 * the flow will be applied immediately.
+	 * If the flow is external (from application) OR device is started,
+	 * OR mreg discover, then apply immediately.
 	 */
-	if (external || dev->data->dev_started) {
+	if (external || dev->data->dev_started ||
+	    (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
+	     attr->priority == MLX5_FLOW_PRIO_RSVD)) {
 		ret = flow_drv_apply(dev, flow, error);
 		if (ret < 0)
 			goto error;
@@ -5698,10 +5690,6 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 	}
 	flow_rxq_flags_set(dev, flow);
 	rte_free(translated_actions);
-	/* Nested flow creation index recovery. */
-	wks->flow_idx = wks->flow_nested_idx;
-	if (wks->flow_nested_idx)
-		wks->flow_nested_idx = 0;
 	tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
 	if (tunnel) {
 		flow->tunnel = 1;
@@ -5709,6 +5697,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
 		mlx5_free(default_miss_ctx.queue);
 	}
+	mlx5_flow_pop_thread_workspace();
 	return idx;
 error:
 	MLX5_ASSERT(flow);
@@ -5724,9 +5713,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
 	rte_errno = ret; /* Restore rte_errno. */
 	ret = rte_errno;
 	rte_errno = ret;
-	wks->flow_idx = wks->flow_nested_idx;
-	if (wks->flow_nested_idx)
-		wks->flow_nested_idx = 0;
+	mlx5_flow_pop_thread_workspace();
 error_before_hairpin_split:
 	rte_free(translated_actions);
 	return 0;
@@ -5963,12 +5950,14 @@ static void
 flow_release_workspace(void *data)
 {
 	struct mlx5_flow_workspace *wks = data;
+	struct mlx5_flow_workspace *next;
 
-	if (!wks)
-		return;
-	free(wks->rss_desc[0].queue);
-	free(wks->rss_desc[1].queue);
-	free(wks);
+	while (wks) {
+		next = wks->next;
+		free(wks->rss_desc.queue);
+		free(wks);
+		wks = next;
+	}
 }
 
 /**
@@ -5982,52 +5971,116 @@ flow_alloc_workspace(void)
 }
 
 /**
- * Get thread specific flow workspace.
+ * Get thread specific current flow workspace.
  *
- * @return pointer to thread specific flowworkspace data, NULL on error.
+ * @return pointer to thread specific flow workspace data, NULL on error.
  */
 struct mlx5_flow_workspace*
 mlx5_flow_get_thread_workspace(void)
 {
 	struct mlx5_flow_workspace *data;
 
-	if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
-		DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
-		return NULL;
-	}
 	data = pthread_getspecific(key_workspace);
+	MLX5_ASSERT(data && data->inuse);
+	if (!data || !data->inuse)
+		DRV_LOG(ERR, "flow workspace not initialized.");
+	return data;
+}
+
+/**
+ * Allocate and init new flow workspace.
+ *
+ * @return pointer to flow workspace data, NULL on error.
+ */
+static struct mlx5_flow_workspace*
+flow_alloc_thread_workspace(void)
+{
+	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+
 	if (!data) {
-		data = calloc(1, sizeof(*data));
-		if (!data) {
-			DRV_LOG(ERR, "Failed to allocate flow workspace "
-				"memory.");
-			return NULL;
-		}
-		data->rss_desc[0].queue = calloc(1,
-				sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-		if (!data->rss_desc[0].queue)
-			goto err;
-		data->rss_desc[1].queue = calloc(1,
-				sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-		if (!data->rss_desc[1].queue)
-			goto err;
-		data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
-		data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
-		if (pthread_setspecific(key_workspace, data)) {
-			DRV_LOG(ERR, "Failed to set flow workspace to thread.");
-			goto err;
-		}
+		DRV_LOG(ERR, "Failed to allocate flow workspace "
+			"memory.");
+		return NULL;
 	}
+	data->rss_desc.queue = calloc(1,
+			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
+	if (!data->rss_desc.queue)
+		goto err;
+	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
 	return data;
 err:
-	if (data->rss_desc[0].queue)
-		free(data->rss_desc[0].queue);
-	if (data->rss_desc[1].queue)
-		free(data->rss_desc[1].queue);
+	if (data->rss_desc.queue)
+		free(data->rss_desc.queue);
 	free(data);
 	return NULL;
 }
 
+/**
+ * Get new thread specific flow workspace.
+ *
+ * If current workspace inuse, create new one and set as current.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
+ */
+static struct mlx5_flow_workspace*
+mlx5_flow_push_thread_workspace(void)
+{
+	struct mlx5_flow_workspace *curr;
+	struct mlx5_flow_workspace *data;
+
+	if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
+		DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
+		return NULL;
+	}
+	curr = pthread_getspecific(key_workspace);
+	if (!curr) {
+		data = flow_alloc_thread_workspace();
+		if (!data)
+			return NULL;
+	} else if (!curr->inuse) {
+		data = curr;
+	} else if (curr->next) {
+		data = curr->next;
+	} else {
+		data = flow_alloc_thread_workspace();
+		if (!data)
+			return NULL;
+		curr->next = data;
+		data->prev = curr;
+	}
+	data->inuse = 1;
+	data->flow_idx = 0;
+	/* Set as current workspace */
+	if (pthread_setspecific(key_workspace, data))
+		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+	return data;
+}
+
+/**
+ * Close current thread specific flow workspace.
+ *
+ * If previous workspace available, set it as current.
+ *
+ * @return pointer to thread specific flow workspace data, NULL on error.
+ */
+static void
+mlx5_flow_pop_thread_workspace(void)
+{
+	struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
+
+	if (!data)
+		return;
+	if (!data->inuse) {
+		DRV_LOG(ERR, "Failed to close unused flow workspace.");
+		return;
+	}
+	data->inuse = 0;
+	if (!data->prev)
+		return;
+	if (pthread_setspecific(key_workspace, data->prev))
+		DRV_LOG(ERR, "Failed to set flow workspace to thread.");
+}
+
 /**
  * Verify the flow list is empty
  *
@@ -7157,8 +7210,7 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
 				      flow_idx);
 		if (!flow)
 			continue;
-		if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
-			config->flow_mreg_c[n++] = idx;
+		config->flow_mreg_c[n++] = idx;
 		flow_list_destroy(dev, NULL, flow_idx);
 	}
 	for (; n < MLX5_MREG_C_NUM; ++n)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e3a5030785..5fac8672fc 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1092,11 +1092,14 @@ struct rte_flow_shared_action {
 
 /* Thread specific flow workspace intermediate data. */
 struct mlx5_flow_workspace {
+	/* If creating another flow in same thread, push new as stack. */
+	struct mlx5_flow_workspace *prev;
+	struct mlx5_flow_workspace *next;
+	uint32_t inuse; /* can't create new flow with current. */
 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
-	struct mlx5_flow_rss_desc rss_desc[2];
-	uint32_t rssq_num[2]; /* Allocated queue num in rss_desc. */
+	struct mlx5_flow_rss_desc rss_desc;
+	uint32_t rssq_num; /* Allocated queue num in rss_desc. */
 	int flow_idx; /* Intermediate device flow index. */
-	int flow_nested_idx; /* Intermediate device flow index, nested. */
 };
 
 struct mlx5_flow_split_info {
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 78c710fef9..e4238b036a 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8983,7 +8983,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
 	uint64_t action_flags = 0;
 
 	MLX5_ASSERT(wks);
-	rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+	rss_desc = &wks->rss_desc;
 	sample_act = &res->sample_act;
 	sample_idx = &res->sample_idx;
 	sample_action = (const struct rte_flow_action_sample *)action->conf;
@@ -9195,7 +9195,7 @@ flow_dv_create_action_sample(struct rte_eth_dev *dev,
 	uint32_t hrxq_idx;
 
 	MLX5_ASSERT(wks);
-	rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+	rss_desc = &wks->rss_desc;
 	if (num_of_dest > 1) {
 		if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
 			/* Handle QP action for mirroring */
@@ -9573,8 +9573,12 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		.skip_scale = !!dev_flow->skip_scale,
 	};
 
-	MLX5_ASSERT(wks);
-	rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+	if (!wks)
+		return rte_flow_error_set(error, ENOMEM,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL,
+					  "failed to push flow workspace");
+	rss_desc = &wks->rss_desc;
 	memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
 	memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
@@ -10640,8 +10644,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 	int err;
 	int idx;
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
-	struct mlx5_flow_rss_desc *rss_desc =
-				&wks->rss_desc[!!wks->flow_nested_idx];
+	struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
 	MLX5_ASSERT(wks);
 	if (rss_desc->shared_rss) {
@@ -10649,7 +10652,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 		MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
 		dh->rix_srss = rss_desc->shared_rss;
 	}
-	for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
+	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
 		dev_flow = &wks->flows[idx];
 		dv = &dev_flow->dv;
 		dh = dev_flow->handle;
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index f283de4f7e..59291fbd09 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1703,7 +1703,7 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 	struct mlx5_flow_rss_desc *rss_desc;
 
 	MLX5_ASSERT(wks);
-	rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
+	rss_desc = &wks->rss_desc;
 	if (priority == MLX5_FLOW_PRIO_RSVD)
 		priority = priv->config.flow_prio - 1;
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -1956,7 +1956,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
 	MLX5_ASSERT(wks);
-	for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
+	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
 		dev_flow = &wks->flows[idx];
 		handle = dev_flow->handle;
 		if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
@@ -1964,8 +1964,7 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 			hrxq = priv->drop_queue.hrxq;
 		} else {
 			uint32_t hrxq_idx;
-			struct mlx5_flow_rss_desc *rss_desc =
-				&wks->rss_desc[!!wks->flow_nested_idx];
+			struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
 
 			MLX5_ASSERT(rss_desc->queue_num);
 			rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix nested flow creation error
  2020-11-09 22:57 [dpdk-dev] [PATCH] net/mlx5: fix nested flow creation error Xueming Li
@ 2020-11-11  8:53 ` Matan Azrad
  2020-11-13 19:22   ` Thomas Monjalon
  0 siblings, 1 reply; 3+ messages in thread
From: Matan Azrad @ 2020-11-11  8:53 UTC (permalink / raw)
  To: Xueming(Steven) Li, Slava Ovsiienko
  Cc: dev, Xueming(Steven) Li, Asaf Penso, bingz, stable



 From: Xueming Li 
> If xmedata mode 1 enabled and create a flow with RSS and mark action, there
> was an error that rdma-core failed to create RQT due to wrong queue
> definition. This was due to mixed flow creation in thread specific flow
> workspace.
> 
> This patch introduces nested flow workspace(context data), each flow uses
> dedicate flow workspace, pop and restore workspace when nested flow
> creation done, the original flow with continue with original flow workspace.
> The total number of thread specific flow workspace should be
> 2 due to only one nested flow creation scenario so far.
> 
> Fixes: 8bb81f2649b1 ("net/mlx5: use thread specific flow workspace")
> Fixes: 3ac3d8234b82 ("net/mlx5: fix index when creating flow")
> Cc: bingz@mellanox.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix nested flow creation error
  2020-11-11  8:53 ` Matan Azrad
@ 2020-11-13 19:22   ` Thomas Monjalon
  0 siblings, 0 replies; 3+ messages in thread
From: Thomas Monjalon @ 2020-11-13 19:22 UTC (permalink / raw)
  To: Xueming(Steven) Li
  Cc: Slava Ovsiienko, dev, Asaf Penso, bingz, stable, Matan Azrad

> > If xmedata mode 1 enabled and create a flow with RSS and mark action, there
> > was an error that rdma-core failed to create RQT due to wrong queue
> > definition. This was due to mixed flow creation in thread specific flow
> > workspace.
> > 
> > This patch introduces nested flow workspace(context data), each flow uses
> > dedicate flow workspace, pop and restore workspace when nested flow
> > creation done, the original flow with continue with original flow workspace.
> > The total number of thread specific flow workspace should be
> > 2 due to only one nested flow creation scenario so far.
> > 
> > Fixes: 8bb81f2649b1 ("net/mlx5: use thread specific flow workspace")
> > Fixes: 3ac3d8234b82 ("net/mlx5: fix index when creating flow")
> > Cc: bingz@mellanox.com
> > Cc: stable@dpdk.org
> > 
> > Signed-off-by: Xueming Li <xuemingl@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

Applied in next-net-mlx, thanks.



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-11-13 19:22 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-09 22:57 [dpdk-dev] [PATCH] net/mlx5: fix nested flow creation error Xueming Li
2020-11-11  8:53 ` Matan Azrad
2020-11-13 19:22   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).