DPDK patches and discussions
 help / color / mirror / Atom feed
From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Ori Kam <orika@nvidia.com>,  Suanming Mou <suanmingm@nvidia.com>,
	Matan Azrad <matan@nvidia.com>, "Bing Zhao" <bingz@nvidia.com>,
	Gregory Etelson <getelson@nvidia.com>,
	Michael Baum <michaelba@nvidia.com>
Cc: <dev@dpdk.org>, <stable@dpdk.org>
Subject: [PATCH 3/4] net/mlx5: fix rollback on failed flow configure
Date: Wed, 6 Mar 2024 21:21:49 +0100	[thread overview]
Message-ID: <20240306202150.79577-3-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20240306202150.79577-1-dsosnowski@nvidia.com>

If rte_flow_configure() failed, then some port resources
were either not freed, nor reset to the default state.
As a result, assumptions in other places in PMD were invalidated
and that lead to segmentation faults during release of HW Steering
resources when port was closed.

This patch adds missing resource release to rollback procedure
in mlx5 PMD implementation of rte_flow_configure().
Whole rollback procedure is reordered for clarity, to resemble
reverse order of resource allocation.

Fixes: 1939eb6f660c ("net/mlx5: support flow port action with HWS")
Fixes: 8a5c816691e7 ("net/mlx5: create NAT64 actions during configuration")
Fixes: 773ca0e91ba1 ("net/mlx5: support VLAN push/pop/modify with HWS")
Fixes: 04a4de756e14 ("net/mlx5: support flow age action with HWS")
Fixes: c3f085a4858c ("net/mlx5: improve pattern template validation")
Cc: stable@dpdk.org

Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_hw.c | 65 ++++++++++++++++++++-------------
 1 file changed, 40 insertions(+), 25 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 21c37b7539..17ab3a98fe 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -10188,7 +10188,7 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
  * mlx5_dev_close -> flow_hw_resource_release -> flow_hw_actions_template_destroy
  */
 static void
-action_template_drop_release(struct rte_eth_dev *dev)
+flow_hw_action_template_drop_release(struct rte_eth_dev *dev)
 {
 	int i;
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -10204,7 +10204,7 @@ action_template_drop_release(struct rte_eth_dev *dev)
 }
 
 static int
-action_template_drop_init(struct rte_eth_dev *dev,
+flow_hw_action_template_drop_init(struct rte_eth_dev *dev,
 			  struct rte_flow_error *error)
 {
 	const struct rte_flow_action drop[2] = {
@@ -10466,7 +10466,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
 	rte_spinlock_init(&priv->hw_ctrl_lock);
 	LIST_INIT(&priv->hw_ctrl_flows);
 	LIST_INIT(&priv->hw_ext_ctrl_flows);
-	ret = action_template_drop_init(dev, error);
+	ret = flow_hw_action_template_drop_init(dev, error);
 	if (ret)
 		goto err;
 	ret = flow_hw_create_ctrl_rx_tables(dev);
@@ -10594,6 +10594,15 @@ flow_hw_configure(struct rte_eth_dev *dev,
 	dev->flow_fp_ops = &mlx5_flow_hw_fp_ops;
 	return 0;
 err:
+	priv->hws_strict_queue = 0;
+	flow_hw_destroy_nat64_actions(priv);
+	flow_hw_destroy_vlan(dev);
+	if (priv->hws_age_req)
+		mlx5_hws_age_pool_destroy(priv);
+	if (priv->hws_cpool) {
+		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
+		priv->hws_cpool = NULL;
+	}
 	if (priv->hws_ctpool) {
 		flow_hw_ct_pool_destroy(dev, priv->hws_ctpool);
 		priv->hws_ctpool = NULL;
@@ -10602,29 +10611,38 @@ flow_hw_configure(struct rte_eth_dev *dev,
 		flow_hw_ct_mng_destroy(dev, priv->ct_mng);
 		priv->ct_mng = NULL;
 	}
-	if (priv->hws_age_req)
-		mlx5_hws_age_pool_destroy(priv);
-	if (priv->hws_cpool) {
-		mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool);
-		priv->hws_cpool = NULL;
-	}
-	action_template_drop_release(dev);
-	mlx5_flow_quota_destroy(dev);
 	flow_hw_destroy_send_to_kernel_action(priv);
 	flow_hw_cleanup_ctrl_fdb_tables(dev);
 	flow_hw_free_vport_actions(priv);
+	if (priv->hw_def_miss) {
+		mlx5dr_action_destroy(priv->hw_def_miss);
+		priv->hw_def_miss = NULL;
+	}
+	flow_hw_cleanup_tx_repr_tagging(dev);
 	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
-		if (priv->hw_drop[i])
+		if (priv->hw_drop[i]) {
 			mlx5dr_action_destroy(priv->hw_drop[i]);
-		if (priv->hw_tag[i])
+			priv->hw_drop[i] = NULL;
+		}
+		if (priv->hw_tag[i]) {
 			mlx5dr_action_destroy(priv->hw_tag[i]);
+			priv->hw_tag[i] = NULL;
+		}
 	}
-	if (priv->hw_def_miss)
-		mlx5dr_action_destroy(priv->hw_def_miss);
-	flow_hw_destroy_nat64_actions(priv);
-	flow_hw_destroy_vlan(dev);
-	if (dr_ctx)
+	mlx5_flow_meter_uninit(dev);
+	mlx5_flow_quota_destroy(dev);
+	flow_hw_cleanup_ctrl_rx_tables(dev);
+	flow_hw_action_template_drop_release(dev);
+	if (dr_ctx) {
 		claim_zero(mlx5dr_context_close(dr_ctx));
+		priv->dr_ctx = NULL;
+	}
+	if (priv->shared_host) {
+		struct mlx5_priv *host_priv = priv->shared_host->data->dev_private;
+
+		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
+		priv->shared_host = NULL;
+	}
 	for (i = 0; i < nb_q_updated; i++) {
 		rte_ring_free(priv->hw_q[i].indir_iq);
 		rte_ring_free(priv->hw_q[i].indir_cq);
@@ -10637,14 +10655,11 @@ flow_hw_configure(struct rte_eth_dev *dev,
 		mlx5_ipool_destroy(priv->acts_ipool);
 		priv->acts_ipool = NULL;
 	}
-	if (_queue_attr)
-		mlx5_free(_queue_attr);
-	if (priv->shared_host) {
-		__atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
-		priv->shared_host = NULL;
-	}
 	mlx5_free(priv->hw_attr);
 	priv->hw_attr = NULL;
+	priv->nb_queue = 0;
+	if (_queue_attr)
+		mlx5_free(_queue_attr);
 	/* Do not overwrite the internal errno information. */
 	if (ret)
 		return ret;
@@ -10677,7 +10692,7 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
 	flow_hw_cleanup_ctrl_fdb_tables(dev);
 	flow_hw_cleanup_tx_repr_tagging(dev);
 	flow_hw_cleanup_ctrl_rx_tables(dev);
-	action_template_drop_release(dev);
+	flow_hw_action_template_drop_release(dev);
 	while (!LIST_EMPTY(&priv->flow_hw_grp)) {
 		grp = LIST_FIRST(&priv->flow_hw_grp);
 		flow_hw_group_unset_miss_group(dev, grp, NULL);
-- 
2.39.2


  parent reply	other threads:[~2024-03-06 20:23 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-06 20:21 [PATCH 1/4] net/mlx5/hws: fix direct index insert on dep wqe Dariusz Sosnowski
2024-03-06 20:21 ` [PATCH 2/4] net/mlx5: fix templates clean up of FDB control flow rules Dariusz Sosnowski
2024-03-06 20:21 ` Dariusz Sosnowski [this message]
2024-03-06 20:21 ` [PATCH 4/4] net/mlx5: fix flow configure validation Dariusz Sosnowski
2024-03-13  7:46 ` [PATCH 1/4] net/mlx5/hws: fix direct index insert on dep wqe Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240306202150.79577-3-dsosnowski@nvidia.com \
    --to=dsosnowski@nvidia.com \
    --cc=bingz@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=getelson@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=michaelba@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=stable@dpdk.org \
    --cc=suanmingm@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).