DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <matan@nvidia.com>, <rasland@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Shahaf Shuler <shahafs@nvidia.com>,
	Xueming Li <xuemingl@nvidia.com>
Subject: [dpdk-dev] [PATCH v4 4/6] net/mlx5: fix tunnel offload object allocation
Date: Mon, 16 Nov 2020 11:49:03 +0200	[thread overview]
Message-ID: <20201116094905.12873-5-getelson@nvidia.com> (raw)
In-Reply-To: <20201116094905.12873-1-getelson@nvidia.com>

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 35 ++++++++-----------------
 3 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..31011c3a72 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_ID] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TNL_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7ee63a7a14..1f2b873942 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 29785e8891..cd94a73e53 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7432,14 +7432,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7461,39 +7460,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2


  parent reply	other threads:[~2020-11-16  9:50 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16  9:49   ` Gregory Etelson [this message]
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-17 14:33     ` Ferruh Yigit
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-17 10:51   ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201116094905.12873-5-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=shahafs@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    --cc=xuemingl@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).