patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Alexander Kozyrev <akozyrev@nvidia.com>
To: <dev@dpdk.org>
Cc: <stable@dpdk.org>, <rasland@nvidia.com>, <viacheslavo@nvidia.com>,
	<matan@nvidia.com>, <dsosnowski@nvidia.com>, <bingz@nvidia.com>,
	<orika@nvidia.com>, <suanmingm@nvidia.com>
Subject: [PATCH] net/mlx5: break resource release forever loop
Date: Thu, 30 May 2024 00:46:32 +0300	[thread overview]
Message-ID: <20240529214632.1980988-1-akozyrev@nvidia.com> (raw)

There is a loop inside the flow_hw_resource_release() function
that tries to free all the template patterns and tables until they
are successfully released. But some of the tables may be still in use
in case of the ungraceful application termination. Which causes the
forever loop in the app on the exit. Don't wait for the tables release
and try them to free only once and proceed with the exit.

Fixes: d1559d66ed ("net/mlx5: add table management")
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 62 +++++++++++++++++++--------------
 1 file changed, 36 insertions(+), 26 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 68c5a36bbb..c68cde14e1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5048,7 +5048,7 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, EBUSY,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				   NULL,
-				   "table in use");
+				   "table is in use");
 	}
 	LIST_REMOVE(table, next);
 	for (i = 0; i < table->nb_item_templates; i++)
@@ -7342,7 +7342,7 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, EBUSY,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				   NULL,
-				   "action template in using");
+				   "action template is in use");
 	}
 	if (template->action_flags & flag)
 		mlx5_free_srh_flex_parser(dev);
@@ -7966,7 +7966,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
 		return rte_flow_error_set(error, EBUSY,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				   NULL,
-				   "item template in using");
+				   "item template is in use");
 	}
 	if (template->item_flags & (MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT |
 				    MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT))
@@ -10767,10 +10767,10 @@ void
 flow_hw_resource_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct rte_flow_template_table *tbl;
-	struct rte_flow_pattern_template *it;
-	struct rte_flow_actions_template *at;
-	struct mlx5_flow_group *grp;
+	struct rte_flow_template_table *tbl, *temp_tbl;
+	struct rte_flow_pattern_template *it, *temp_it;
+	struct rte_flow_actions_template *at, *temp_at;
+	struct mlx5_flow_group *grp, *temp_grp;
 	uint32_t i;
 
 	if (!priv->dr_ctx)
@@ -10782,25 +10782,35 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
 	flow_hw_cleanup_tx_repr_tagging(dev);
 	flow_hw_cleanup_ctrl_rx_tables(dev);
 	flow_hw_action_template_drop_release(dev);
-	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
-		grp = LIST_FIRST(&priv->flow_hw_grp);
-		flow_hw_group_unset_miss_group(dev, grp, NULL);
-	}
-	while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) {
-		tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
-		flow_hw_table_destroy(dev, tbl, NULL);
-	}
-	while (!LIST_EMPTY(&priv->flow_hw_tbl)) {
-		tbl = LIST_FIRST(&priv->flow_hw_tbl);
-		flow_hw_table_destroy(dev, tbl, NULL);
-	}
-	while (!LIST_EMPTY(&priv->flow_hw_itt)) {
-		it = LIST_FIRST(&priv->flow_hw_itt);
-		flow_hw_pattern_template_destroy(dev, it, NULL);
-	}
-	while (!LIST_EMPTY(&priv->flow_hw_at)) {
-		at = LIST_FIRST(&priv->flow_hw_at);
-		flow_hw_actions_template_destroy(dev, at, NULL);
+	grp = LIST_FIRST(&priv->flow_hw_grp);
+	while (grp) {
+		temp_grp = LIST_NEXT(grp, next);
+		claim_zero(flow_hw_group_unset_miss_group(dev, grp, NULL));
+		grp = temp_grp;
+	}
+	tbl = LIST_FIRST(&priv->flow_hw_tbl_ongo);
+	while (tbl) {
+		temp_tbl = LIST_NEXT(tbl, next);
+		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
+		tbl = temp_tbl;
+	}
+	tbl = LIST_FIRST(&priv->flow_hw_tbl);
+	while (tbl) {
+		temp_tbl = LIST_NEXT(tbl, next);
+		claim_zero(flow_hw_table_destroy(dev, tbl, NULL));
+		tbl = temp_tbl;
+	}
+	it = LIST_FIRST(&priv->flow_hw_itt);
+	while (it) {
+		temp_it = LIST_NEXT(it, next);
+		claim_zero(flow_hw_pattern_template_destroy(dev, it, NULL));
+		it = temp_it;
+	}
+	at = LIST_FIRST(&priv->flow_hw_at);
+	while (at) {
+		temp_at = LIST_NEXT(at, next);
+		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
+		at = temp_at;
 	}
 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
 		if (priv->hw_drop[i])
-- 
2.18.2


             reply	other threads:[~2024-05-29 21:47 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-29 21:46 Alexander Kozyrev [this message]
2024-06-03 12:00 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240529214632.1980988-1-akozyrev@nvidia.com \
    --to=akozyrev@nvidia.com \
    --cc=bingz@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=dsosnowski@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=stable@dpdk.org \
    --cc=suanmingm@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).