patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Bing Zhao <bingz@nvidia.com>
To: <viacheslavo@nvidia.com>, <dev@dpdk.org>, <rasland@nvidia.com>
Cc: <orika@nvidia.com>, <dsosnowski@nvidia.com>,
	<suanmingm@nvidia.com>, <matan@nvidia.com>,
	Rongwei Liu <rongweil@nvidia.com>, <bingz@nvidia.com>,
	<stable@dpdk.org>
Subject: [PATCH] net/mlx5: fix mlx5 device start failure
Date: Thu, 13 Jun 2024 15:45:28 +0300	[thread overview]
Message-ID: <20240613124528.547952-1-bingz@nvidia.com> (raw)

From: Rongwei Liu <rongweil@nvidia.com>

When devargs "allow_duplicate_pattern=0" is specified, PMD won't
allow duplicated flows to be inserted and return EEXIST as rte_errno.

The queue/RSS split table is shared globally by all representors and
PMD didn't prepend port information into it, so all the following ports
tries to insert the same flows and cause PMD insertion failure.

Using the hash list to manage it can solve the issue.

Fixes: ddb68e473 ("net/mlx5: add extended metadata mode for HWS")
Cc: bingz@nvidia.com
Cc: stable@dpdk.org

Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 106 +++++++++++++++++--------------
 drivers/net/mlx5/mlx5.c          |   2 -
 drivers/net/mlx5/mlx5.h          |   4 +-
 drivers/net/mlx5/mlx5_flow.c     |  16 ++---
 4 files changed, 67 insertions(+), 61 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 1753acd050..50f4810bff 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -467,15 +467,16 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
  * Routine checks the reference counter and does actual
  * resources creation/initialization only if counter is zero.
  *
- * @param[in] priv
- *   Pointer to the private device data structure.
+ * @param[in] eth_dev
+ *   Pointer to the device.
  *
  * @return
  *   Zero on success, positive error code otherwise.
  */
 static int
-mlx5_alloc_shared_dr(struct mlx5_priv *priv)
+mlx5_alloc_shared_dr(struct rte_eth_dev *eth_dev)
 {
+	struct mlx5_priv *priv = eth_dev->data->dev_private;
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	char s[MLX5_NAME_SIZE] __rte_unused;
 	int err;
@@ -590,6 +591,44 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		err = errno;
 		goto error;
 	}
+
+	if (sh->config.dv_flow_en == 1) {
+		/* Query availability of metadata reg_c's. */
+		if (!priv->sh->metadata_regc_check_flag) {
+			err = mlx5_flow_discover_mreg_c(eth_dev);
+			if (err < 0) {
+				err = -err;
+				goto error;
+			}
+		}
+		if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
+			DRV_LOG(DEBUG,
+				"port %u extensive metadata register is not supported",
+				eth_dev->data->port_id);
+			if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+				DRV_LOG(ERR, "metadata mode %u is not supported "
+					     "(no metadata registers available)",
+					     sh->config.dv_xmeta_en);
+				err = ENOTSUP;
+				goto error;
+			}
+		}
+		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+		    mlx5_flow_ext_mreg_supported(eth_dev) && sh->dv_regc0_mask) {
+			sh->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
+							    MLX5_FLOW_MREG_HTABLE_SZ,
+							    false, true, eth_dev,
+							    flow_dv_mreg_create_cb,
+							    flow_dv_mreg_match_cb,
+							    flow_dv_mreg_remove_cb,
+							    flow_dv_mreg_clone_cb,
+							    flow_dv_mreg_clone_free_cb);
+			if (!sh->mreg_cp_tbl) {
+				err = ENOMEM;
+				goto error;
+			}
+		}
+	}
 #endif
 	if (!sh->tunnel_hub && sh->config.dv_miss_info)
 		err = mlx5_alloc_tunnel_hub(sh);
@@ -674,6 +713,10 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		mlx5_list_destroy(sh->dest_array_list);
 		sh->dest_array_list = NULL;
 	}
+	if (sh->mreg_cp_tbl) {
+		mlx5_hlist_destroy(sh->mreg_cp_tbl);
+		sh->mreg_cp_tbl = NULL;
+	}
 	return err;
 }
 
@@ -771,6 +814,10 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
 		mlx5_list_destroy(sh->dest_array_list);
 		sh->dest_array_list = NULL;
 	}
+	if (sh->mreg_cp_tbl) {
+		mlx5_hlist_destroy(sh->mreg_cp_tbl);
+		sh->mreg_cp_tbl = NULL;
+	}
 }
 
 /**
@@ -1572,13 +1619,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = mlx5_vlan_vmwa_init(eth_dev, spawn->ifindex);
-	if (sh->config.dv_flow_en) {
-		err = mlx5_alloc_shared_dr(priv);
-		if (err)
-			goto error;
-		if (mlx5_flex_item_port_init(eth_dev) < 0)
-			goto error;
-	}
 	if (mlx5_devx_obj_ops_en(sh)) {
 		priv->obj_ops = devx_obj_ops;
 		mlx5_queue_counter_id_prepare(eth_dev);
@@ -1629,6 +1669,13 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			goto error;
 	}
 	rte_rwlock_init(&priv->ind_tbls_lock);
+	if (sh->config.dv_flow_en) {
+		err = mlx5_alloc_shared_dr(eth_dev);
+		if (err)
+			goto error;
+		if (mlx5_flex_item_port_init(eth_dev) < 0)
+			goto error;
+	}
 	if (sh->phdev->config.ipv6_tc_fallback == MLX5_IPV6_TC_UNKNOWN) {
 		sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_OK;
 		if (!sh->cdev->config.hca_attr.modify_outer_ipv6_traffic_class ||
@@ -1715,43 +1762,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		err = -err;
 		goto error;
 	}
-	/* Query availability of metadata reg_c's. */
-	if (!priv->sh->metadata_regc_check_flag) {
-		err = mlx5_flow_discover_mreg_c(eth_dev);
-		if (err < 0) {
-			err = -err;
-			goto error;
-		}
-	}
-	if (!mlx5_flow_ext_mreg_supported(eth_dev)) {
-		DRV_LOG(DEBUG,
-			"port %u extensive metadata register is not supported",
-			eth_dev->data->port_id);
-		if (sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
-			DRV_LOG(ERR, "metadata mode %u is not supported "
-				     "(no metadata registers available)",
-				     sh->config.dv_xmeta_en);
-			err = ENOTSUP;
-			goto error;
-		}
-	}
-	if (sh->config.dv_flow_en &&
-	    sh->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-	    mlx5_flow_ext_mreg_supported(eth_dev) &&
-	    priv->sh->dv_regc0_mask) {
-		priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
-						      MLX5_FLOW_MREG_HTABLE_SZ,
-						      false, true, eth_dev,
-						      flow_dv_mreg_create_cb,
-						      flow_dv_mreg_match_cb,
-						      flow_dv_mreg_remove_cb,
-						      flow_dv_mreg_clone_cb,
-						    flow_dv_mreg_clone_free_cb);
-		if (!priv->mreg_cp_tbl) {
-			err = ENOMEM;
-			goto error;
-		}
-	}
 	rte_spinlock_init(&priv->shared_act_sl);
 	mlx5_flow_counter_mode_config(eth_dev);
 	mlx5_flow_drop_action_config(eth_dev);
@@ -1770,8 +1780,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 		    priv->sh->config.dv_esw_en)
 			flow_hw_destroy_vport_action(eth_dev);
 #endif
-		if (priv->mreg_cp_tbl)
-			mlx5_hlist_destroy(priv->mreg_cp_tbl);
 		if (priv->sh)
 			mlx5_os_free_shared_dr(priv);
 		if (priv->nl_socket_route >= 0)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997b02c571..e482f7f0e5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2394,8 +2394,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 		mlx5_devx_cmd_destroy(priv->q_counters);
 		priv->q_counters = NULL;
 	}
-	if (priv->mreg_cp_tbl)
-		mlx5_hlist_destroy(priv->mreg_cp_tbl);
 	mlx5_mprq_free_mp(dev);
 	mlx5_os_free_shared_dr(priv);
 #ifdef HAVE_MLX5_HWS_SUPPORT
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index c9a3837bd2..bd149b43e5 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1542,6 +1542,8 @@ struct mlx5_dev_ctx_shared {
 		struct mlx5_hlist *flow_tbls; /* SWS flow table. */
 		struct mlx5_hlist *groups; /* HWS flow group. */
 	};
+	struct mlx5_hlist *mreg_cp_tbl;
+	/* Hash table of Rx metadata register copy table. */
 	struct mlx5_flow_tunnel_hub *tunnel_hub;
 	/* Direct Rules tables for FDB, NIC TX+RX */
 	void *dr_drop_action; /* Pointer to DR drop action, any domain. */
@@ -1968,8 +1970,6 @@ struct mlx5_priv {
 	int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
 	int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
 	struct mlx5_nl_vlan_vmwa_context *vmwa_context; /* VLAN WA context. */
-	struct mlx5_hlist *mreg_cp_tbl;
-	/* Hash table of Rx metadata register copy table. */
 	struct mlx5_mtr_config mtr_config; /* Meter configuration */
 	uint8_t mtr_sfx_reg; /* Meter prefix-suffix flow match REG_C. */
 	struct mlx5_legacy_flow_meters flow_meters; /* MTR list. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 7bcbbc74b5..9ccbbecc50 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5178,8 +5178,8 @@ flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
 	};
 
 	/* Check if already registered. */
-	MLX5_ASSERT(priv->mreg_cp_tbl);
-	entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
+	MLX5_ASSERT(priv->sh->mreg_cp_tbl);
+	entry = mlx5_hlist_register(priv->sh->mreg_cp_tbl, mark_id, &ctx);
 	if (!entry)
 		return NULL;
 	return container_of(entry, struct mlx5_flow_mreg_copy_resource,
@@ -5218,10 +5218,10 @@ flow_mreg_del_copy_action(struct rte_eth_dev *dev,
 		return;
 	mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
 				 flow->rix_mreg_copy);
-	if (!mcp_res || !priv->mreg_cp_tbl)
+	if (!mcp_res || !priv->sh->mreg_cp_tbl)
 		return;
 	MLX5_ASSERT(mcp_res->rix_flow);
-	mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
+	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, &mcp_res->hlist_ent);
 	flow->rix_mreg_copy = 0;
 }
 
@@ -5243,14 +5243,14 @@ flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
 	uint32_t mark_id;
 
 	/* Check if default flow is registered. */
-	if (!priv->mreg_cp_tbl)
+	if (!priv->sh->mreg_cp_tbl)
 		return;
 	mark_id = MLX5_DEFAULT_COPY_ID;
 	ctx.data = &mark_id;
-	entry = mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx);
+	entry = mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx);
 	if (!entry)
 		return;
-	mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
+	mlx5_hlist_unregister(priv->sh->mreg_cp_tbl, entry);
 }
 
 /**
@@ -5288,7 +5288,7 @@ flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
 	 */
 	mark_id = MLX5_DEFAULT_COPY_ID;
 	ctx.data = &mark_id;
-	if (mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, &ctx))
+	if (mlx5_hlist_lookup(priv->sh->mreg_cp_tbl, mark_id, &ctx))
 		return 0;
 	mcp_res = flow_mreg_add_copy_action(dev, mark_id, error);
 	if (!mcp_res)
-- 
2.34.1


             reply	other threads:[~2024-06-13 12:46 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-13 12:45 Bing Zhao [this message]
2024-07-01  7:31 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240613124528.547952-1-bingz@nvidia.com \
    --to=bingz@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=dsosnowski@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=rongweil@nvidia.com \
    --cc=stable@dpdk.org \
    --cc=suanmingm@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).