DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix flow workspace destruction
@ 2023-07-01 14:43 Bing Zhao
  2023-07-01 14:51 ` [PATCH v2] " Bing Zhao
  0 siblings, 1 reply; 5+ messages in thread
From: Bing Zhao @ 2023-07-01 14:43 UTC (permalink / raw)
  To: matan, viacheslavo, orika, suanmingm, rasland
  Cc: dev, Gregory Etelson, stable

From: Gregory Etelson <getelson@nvidia.com>

PMD uses pthread key to allocate and access per thread flow
workspace memory buffers.

PMD registered a key destructor function to clean up flow workspace
buffers. However, the key destructor was not called by the pthread
library.

The patch keeps track of per-thread flow workspaces in PMD.
Flow workspaces memory release is activated from PMD destructor.

In the meanwhile, workspace buffer and RSS queues array are allocated
in a single memory chunk with this patch. The maximal number of
queues RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace
adjustment can be removed to reduce the software hiccup:
  1. realloc and content copy
  2. spinlock aquire and release

Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_flow_os.c |  2 +-
 drivers/net/mlx5/mlx5.c               |  1 +
 drivers/net/mlx5/mlx5_flow.c          | 76 +++++++++++----------------
 drivers/net/mlx5/mlx5_flow.h          |  4 +-
 4 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 3c9a823edf..b139bb75b9 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
 int
 mlx5_flow_os_init_workspace_once(void)
 {
-	if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+	if (rte_thread_key_create(&key_workspace, NULL)) {
 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
 		rte_errno = ENOMEM;
 		return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5f0aa296ba..fd9b76027d 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1838,6 +1838,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
 		mlx5_os_net_cleanup();
 		mlx5_flow_os_release_workspace();
+		mlx5_flow_workspace_gc_release();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	if (sh->flex_parsers_dv) {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cf83db7b60..b5874bbe22 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7155,36 +7155,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
 	return tunnel;
 }
 
-/**
- * Adjust flow RSS workspace if needed.
- *
- * @param wks
- *   Pointer to thread flow work space.
- * @param rss_desc
- *   Pointer to RSS descriptor.
- * @param[in] nrssq_num
- *   New RSS queue number.
- *
- * @return
- *   0 on success, -1 otherwise and rte_errno is set.
- */
-static int
-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
-			  struct mlx5_flow_rss_desc *rss_desc,
-			  uint32_t nrssq_num)
-{
-	if (likely(nrssq_num <= wks->rssq_num))
-		return 0;
-	rss_desc->queue = realloc(rss_desc->queue,
-			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
-	if (!rss_desc->queue) {
-		rte_errno = ENOMEM;
-		return -1;
-	}
-	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
-	return 0;
-}
-
 /**
  * Create a flow and add it to @p list.
  *
@@ -7303,8 +7273,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 	if (attr->ingress)
 		rss = flow_get_rss_action(dev, p_actions_rx);
 	if (rss) {
-		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
-			return 0;
+		MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
 		/*
 		 * The following information is required by
 		 * mlx5_flow_hashfields_adjust() in advance.
@@ -8072,12 +8041,34 @@ flow_release_workspace(void *data)
 
 	while (wks) {
 		next = wks->next;
-		free(wks->rss_desc.queue);
 		free(wks);
 		wks = next;
 	}
 }
 
+static struct mlx5_flow_workspace *gc_head = NULL;
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+	rte_spinlock_lock(&mlx5_flow_workspace_lock);
+	ws->gc = gc_head;
+	gc_head = ws;
+	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+void
+mlx5_flow_workspace_gc_release(void)
+{
+	while (gc_head) {
+		struct mlx5_flow_workspace *wks = gc_head;
+
+		gc_head = wks->gc;
+		flow_release_workspace(wks);
+	}
+}
+
 /**
  * Get thread specific current flow workspace.
  *
@@ -8103,23 +8094,17 @@ mlx5_flow_get_thread_workspace(void)
 static struct mlx5_flow_workspace*
 flow_alloc_thread_workspace(void)
 {
-	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+	size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
+	size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
+	struct mlx5_flow_workspace *data = calloc(1, data_size +
+						     rss_queue_array_size);
 
 	if (!data) {
-		DRV_LOG(ERR, "Failed to allocate flow workspace "
-			"memory.");
+		DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
 		return NULL;
 	}
-	data->rss_desc.queue = calloc(1,
-			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-	if (!data->rss_desc.queue)
-		goto err;
-	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+	data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
 	return data;
-err:
-	free(data->rss_desc.queue);
-	free(data);
-	return NULL;
 }
 
 /**
@@ -8140,6 +8125,7 @@ mlx5_flow_push_thread_workspace(void)
 		data = flow_alloc_thread_workspace();
 		if (!data)
 			return NULL;
+		mlx5_flow_workspace_gc_add(data);
 	} else if (!curr->inuse) {
 		data = curr;
 	} else if (curr->next) {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 003e7da3a6..62789853ab 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1496,10 +1496,10 @@ struct mlx5_flow_workspace {
 	/* If creating another flow in same thread, push new as stack. */
 	struct mlx5_flow_workspace *prev;
 	struct mlx5_flow_workspace *next;
+	struct mlx5_flow_workspace *gc;
 	uint32_t inuse; /* can't create new flow with current. */
 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
 	struct mlx5_flow_rss_desc rss_desc;
-	uint32_t rssq_num; /* Allocated queue num in rss_desc. */
 	uint32_t flow_idx; /* Intermediate device flow index. */
 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
 	struct mlx5_flow_meter_policy *policy;
@@ -2022,6 +2022,8 @@ struct mlx5_flow_driver_ops {
 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 void mlx5_flow_pop_thread_workspace(void);
 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+void mlx5_flow_workspace_gc_release(void);
+
 __extension__
 struct flow_grp_info {
 	uint64_t external:1;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] net/mlx5: fix flow workspace destruction
  2023-07-01 14:43 [PATCH] net/mlx5: fix flow workspace destruction Bing Zhao
@ 2023-07-01 14:51 ` Bing Zhao
  2023-07-03  8:59   ` Matan Azrad
  2023-07-03  9:50   ` [PATCH v3] " Bing Zhao
  0 siblings, 2 replies; 5+ messages in thread
From: Bing Zhao @ 2023-07-01 14:51 UTC (permalink / raw)
  To: matan, viacheslavo, orika, suanmingm, rasland
  Cc: dev, Gregory Etelson, stable

From: Gregory Etelson <getelson@nvidia.com>

PMD uses pthread key to allocate and access per thread flow
workspace memory buffers.

PMD registered a key destructor function to clean up flow workspace
buffers. However, the key destructor was not called by the pthread
library.

The patch keeps track of per-thread flow workspaces in PMD.
Flow workspaces memory release is activated from PMD destructor.

In the meanwhile, workspace buffer and RSS queues array are allocated
in a single memory chunk with this patch. The maximal number of
queues RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace
adjustment can be removed to reduce the software hiccup:
  1. realloc and content copy
  2. spinlock acquire and release

Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
Cc: stable@dpdk.org

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
v2: fix typo in the commit message and remove the needless NULL
    pointer initialization for static variable.
---
 drivers/net/mlx5/linux/mlx5_flow_os.c |  2 +-
 drivers/net/mlx5/mlx5.c               |  1 +
 drivers/net/mlx5/mlx5_flow.c          | 76 +++++++++++----------------
 drivers/net/mlx5/mlx5_flow.h          |  4 +-
 4 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 3c9a823edf..b139bb75b9 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
 int
 mlx5_flow_os_init_workspace_once(void)
 {
-	if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+	if (rte_thread_key_create(&key_workspace, NULL)) {
 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
 		rte_errno = ENOMEM;
 		return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5f0aa296ba..fd9b76027d 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1838,6 +1838,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
 		mlx5_os_net_cleanup();
 		mlx5_flow_os_release_workspace();
+		mlx5_flow_workspace_gc_release();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	if (sh->flex_parsers_dv) {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cf83db7b60..d3b1252ad6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7155,36 +7155,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
 	return tunnel;
 }
 
-/**
- * Adjust flow RSS workspace if needed.
- *
- * @param wks
- *   Pointer to thread flow work space.
- * @param rss_desc
- *   Pointer to RSS descriptor.
- * @param[in] nrssq_num
- *   New RSS queue number.
- *
- * @return
- *   0 on success, -1 otherwise and rte_errno is set.
- */
-static int
-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
-			  struct mlx5_flow_rss_desc *rss_desc,
-			  uint32_t nrssq_num)
-{
-	if (likely(nrssq_num <= wks->rssq_num))
-		return 0;
-	rss_desc->queue = realloc(rss_desc->queue,
-			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
-	if (!rss_desc->queue) {
-		rte_errno = ENOMEM;
-		return -1;
-	}
-	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
-	return 0;
-}
-
 /**
  * Create a flow and add it to @p list.
  *
@@ -7303,8 +7273,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 	if (attr->ingress)
 		rss = flow_get_rss_action(dev, p_actions_rx);
 	if (rss) {
-		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
-			return 0;
+		MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
 		/*
 		 * The following information is required by
 		 * mlx5_flow_hashfields_adjust() in advance.
@@ -8072,12 +8041,34 @@ flow_release_workspace(void *data)
 
 	while (wks) {
 		next = wks->next;
-		free(wks->rss_desc.queue);
 		free(wks);
 		wks = next;
 	}
 }
 
+static struct mlx5_flow_workspace *gc_head;
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+	rte_spinlock_lock(&mlx5_flow_workspace_lock);
+	ws->gc = gc_head;
+	gc_head = ws;
+	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+void
+mlx5_flow_workspace_gc_release(void)
+{
+	while (gc_head) {
+		struct mlx5_flow_workspace *wks = gc_head;
+
+		gc_head = wks->gc;
+		flow_release_workspace(wks);
+	}
+}
+
 /**
  * Get thread specific current flow workspace.
  *
@@ -8103,23 +8094,17 @@ mlx5_flow_get_thread_workspace(void)
 static struct mlx5_flow_workspace*
 flow_alloc_thread_workspace(void)
 {
-	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+	size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
+	size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
+	struct mlx5_flow_workspace *data = calloc(1, data_size +
+						     rss_queue_array_size);
 
 	if (!data) {
-		DRV_LOG(ERR, "Failed to allocate flow workspace "
-			"memory.");
+		DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
 		return NULL;
 	}
-	data->rss_desc.queue = calloc(1,
-			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-	if (!data->rss_desc.queue)
-		goto err;
-	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+	data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
 	return data;
-err:
-	free(data->rss_desc.queue);
-	free(data);
-	return NULL;
 }
 
 /**
@@ -8140,6 +8125,7 @@ mlx5_flow_push_thread_workspace(void)
 		data = flow_alloc_thread_workspace();
 		if (!data)
 			return NULL;
+		mlx5_flow_workspace_gc_add(data);
 	} else if (!curr->inuse) {
 		data = curr;
 	} else if (curr->next) {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 003e7da3a6..62789853ab 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1496,10 +1496,10 @@ struct mlx5_flow_workspace {
 	/* If creating another flow in same thread, push new as stack. */
 	struct mlx5_flow_workspace *prev;
 	struct mlx5_flow_workspace *next;
+	struct mlx5_flow_workspace *gc;
 	uint32_t inuse; /* can't create new flow with current. */
 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
 	struct mlx5_flow_rss_desc rss_desc;
-	uint32_t rssq_num; /* Allocated queue num in rss_desc. */
 	uint32_t flow_idx; /* Intermediate device flow index. */
 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
 	struct mlx5_flow_meter_policy *policy;
@@ -2022,6 +2022,8 @@ struct mlx5_flow_driver_ops {
 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 void mlx5_flow_pop_thread_workspace(void);
 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+void mlx5_flow_workspace_gc_release(void);
+
 __extension__
 struct flow_grp_info {
 	uint64_t external:1;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v2] net/mlx5: fix flow workspace destruction
  2023-07-01 14:51 ` [PATCH v2] " Bing Zhao
@ 2023-07-03  8:59   ` Matan Azrad
  2023-07-03  9:50   ` [PATCH v3] " Bing Zhao
  1 sibling, 0 replies; 5+ messages in thread
From: Matan Azrad @ 2023-07-03  8:59 UTC (permalink / raw)
  To: Bing Zhao, Slava Ovsiienko, Ori Kam, Suanming Mou, Raslan Darawsheh
  Cc: dev, Gregory Etelson, stable



From: Bing Zhao <bingz@nvidia.com>
> PMD uses pthread key to allocate and access per thread flow workspace
> memory buffers.
> 
> PMD registered a key destructor function to clean up flow workspace buffers.
> However, the key destructor was not called by the pthread library.
> 
> The patch keeps track of per-thread flow workspaces in PMD.
> Flow workspaces memory release is activated from PMD destructor.
> 
> In the meanwhile, workspace buffer and RSS queues array are allocated in a
> single memory chunk with this patch. The maximal number of queues
> RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace adjustment can
> be removed to reduce the software hiccup:
>   1. realloc and content copy
>   2. spinlock acquire and release
> 
> Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3] net/mlx5: fix flow workspace destruction
  2023-07-01 14:51 ` [PATCH v2] " Bing Zhao
  2023-07-03  8:59   ` Matan Azrad
@ 2023-07-03  9:50   ` Bing Zhao
  2023-07-03 14:03     ` Raslan Darawsheh
  1 sibling, 1 reply; 5+ messages in thread
From: Bing Zhao @ 2023-07-03  9:50 UTC (permalink / raw)
  To: matan, viacheslavo, orika, suanmingm, rasland
  Cc: dev, Gregory Etelson, stable, David Marchand

From: Gregory Etelson <getelson@nvidia.com>

PMD uses pthread key to allocate and access per thread flow
workspace memory buffers.

PMD registered a key destructor function to clean up flow workspace
buffers. However, the key destructor was not called by the pthread
library.

The patch keeps track of per-thread flow workspaces in PMD.
Flow workspaces memory release is activated from PMD destructor.

In the meanwhile, workspace buffer and RSS queues array are allocated
in a single memory chunk with this patch. The maximal number of
queues RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace
adjustment can be removed to reduce the software hiccup:
  1. realloc and content copy
  2. spinlock acquire and release

Bugzilla ID: 1255

Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
Cc: stable@dpdk.org

Reported-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
v2: fix typo and code style
v3: add bugzilla information
---
 drivers/net/mlx5/linux/mlx5_flow_os.c |  2 +-
 drivers/net/mlx5/mlx5.c               |  1 +
 drivers/net/mlx5/mlx5_flow.c          | 76 +++++++++++----------------
 drivers/net/mlx5/mlx5_flow.h          |  4 +-
 4 files changed, 36 insertions(+), 47 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 3c9a823edf..b139bb75b9 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
 int
 mlx5_flow_os_init_workspace_once(void)
 {
-	if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+	if (rte_thread_key_create(&key_workspace, NULL)) {
 		DRV_LOG(ERR, "Can't create flow workspace data thread key.");
 		rte_errno = ENOMEM;
 		return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5f0aa296ba..fd9b76027d 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1838,6 +1838,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 	if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
 		mlx5_os_net_cleanup();
 		mlx5_flow_os_release_workspace();
+		mlx5_flow_workspace_gc_release();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
 	if (sh->flex_parsers_dv) {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cf83db7b60..d3b1252ad6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7155,36 +7155,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
 	return tunnel;
 }
 
-/**
- * Adjust flow RSS workspace if needed.
- *
- * @param wks
- *   Pointer to thread flow work space.
- * @param rss_desc
- *   Pointer to RSS descriptor.
- * @param[in] nrssq_num
- *   New RSS queue number.
- *
- * @return
- *   0 on success, -1 otherwise and rte_errno is set.
- */
-static int
-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
-			  struct mlx5_flow_rss_desc *rss_desc,
-			  uint32_t nrssq_num)
-{
-	if (likely(nrssq_num <= wks->rssq_num))
-		return 0;
-	rss_desc->queue = realloc(rss_desc->queue,
-			  sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
-	if (!rss_desc->queue) {
-		rte_errno = ENOMEM;
-		return -1;
-	}
-	wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
-	return 0;
-}
-
 /**
  * Create a flow and add it to @p list.
  *
@@ -7303,8 +7273,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
 	if (attr->ingress)
 		rss = flow_get_rss_action(dev, p_actions_rx);
 	if (rss) {
-		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
-			return 0;
+		MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
 		/*
 		 * The following information is required by
 		 * mlx5_flow_hashfields_adjust() in advance.
@@ -8072,12 +8041,34 @@ flow_release_workspace(void *data)
 
 	while (wks) {
 		next = wks->next;
-		free(wks->rss_desc.queue);
 		free(wks);
 		wks = next;
 	}
 }
 
+static struct mlx5_flow_workspace *gc_head;
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+	rte_spinlock_lock(&mlx5_flow_workspace_lock);
+	ws->gc = gc_head;
+	gc_head = ws;
+	rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+void
+mlx5_flow_workspace_gc_release(void)
+{
+	while (gc_head) {
+		struct mlx5_flow_workspace *wks = gc_head;
+
+		gc_head = wks->gc;
+		flow_release_workspace(wks);
+	}
+}
+
 /**
  * Get thread specific current flow workspace.
  *
@@ -8103,23 +8094,17 @@ mlx5_flow_get_thread_workspace(void)
 static struct mlx5_flow_workspace*
 flow_alloc_thread_workspace(void)
 {
-	struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+	size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
+	size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
+	struct mlx5_flow_workspace *data = calloc(1, data_size +
+						     rss_queue_array_size);
 
 	if (!data) {
-		DRV_LOG(ERR, "Failed to allocate flow workspace "
-			"memory.");
+		DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
 		return NULL;
 	}
-	data->rss_desc.queue = calloc(1,
-			sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
-	if (!data->rss_desc.queue)
-		goto err;
-	data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+	data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
 	return data;
-err:
-	free(data->rss_desc.queue);
-	free(data);
-	return NULL;
 }
 
 /**
@@ -8140,6 +8125,7 @@ mlx5_flow_push_thread_workspace(void)
 		data = flow_alloc_thread_workspace();
 		if (!data)
 			return NULL;
+		mlx5_flow_workspace_gc_add(data);
 	} else if (!curr->inuse) {
 		data = curr;
 	} else if (curr->next) {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 003e7da3a6..62789853ab 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1496,10 +1496,10 @@ struct mlx5_flow_workspace {
 	/* If creating another flow in same thread, push new as stack. */
 	struct mlx5_flow_workspace *prev;
 	struct mlx5_flow_workspace *next;
+	struct mlx5_flow_workspace *gc;
 	uint32_t inuse; /* can't create new flow with current. */
 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
 	struct mlx5_flow_rss_desc rss_desc;
-	uint32_t rssq_num; /* Allocated queue num in rss_desc. */
 	uint32_t flow_idx; /* Intermediate device flow index. */
 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
 	struct mlx5_flow_meter_policy *policy;
@@ -2022,6 +2022,8 @@ struct mlx5_flow_driver_ops {
 struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 void mlx5_flow_pop_thread_workspace(void);
 struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+void mlx5_flow_workspace_gc_release(void);
+
 __extension__
 struct flow_grp_info {
 	uint64_t external:1;
-- 
2.34.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v3] net/mlx5: fix flow workspace destruction
  2023-07-03  9:50   ` [PATCH v3] " Bing Zhao
@ 2023-07-03 14:03     ` Raslan Darawsheh
  0 siblings, 0 replies; 5+ messages in thread
From: Raslan Darawsheh @ 2023-07-03 14:03 UTC (permalink / raw)
  To: Bing Zhao, Matan Azrad, Slava Ovsiienko, Ori Kam, Suanming Mou
  Cc: dev, Gregory Etelson, stable, David Marchand

Hi,

> -----Original Message-----
> From: Bing Zhao <bingz@nvidia.com>
> Sent: Monday, July 3, 2023 12:51 PM
> To: Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Ori Kam <orika@nvidia.com>; Suanming Mou
> <suanmingm@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>
> Cc: dev@dpdk.org; Gregory Etelson <getelson@nvidia.com>;
> stable@dpdk.org; David Marchand <david.marchand@redhat.com>
> Subject: [PATCH v3] net/mlx5: fix flow workspace destruction
> 
> From: Gregory Etelson <getelson@nvidia.com>
> 
> PMD uses pthread key to allocate and access per thread flow workspace
> memory buffers.
> 
> PMD registered a key destructor function to clean up flow workspace buffers.
> However, the key destructor was not called by the pthread library.
> 
> The patch keeps track of per-thread flow workspaces in PMD.
> Flow workspaces memory release is activated from PMD destructor.
> 
> In the meanwhile, workspace buffer and RSS queues array are allocated in a
> single memory chunk with this patch. The maximal number of queues
> RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace adjustment can
> be removed to reduce the software hiccup:
>   1. realloc and content copy
>   2. spinlock acquire and release
> 
> Bugzilla ID: 1255
> 
> Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
> Cc: stable@dpdk.org
> 
> Reported-by: David Marchand <david.marchand@redhat.com>
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
> v2: fix typo and code style
> v3: add bugzilla information

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-07-03 14:03 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-01 14:43 [PATCH] net/mlx5: fix flow workspace destruction Bing Zhao
2023-07-01 14:51 ` [PATCH v2] " Bing Zhao
2023-07-03  8:59   ` Matan Azrad
2023-07-03  9:50   ` [PATCH v3] " Bing Zhao
2023-07-03 14:03     ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).