DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5
@ 2020-11-11  7:14 Gregory Etelson
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
                   ` (6 more replies)
  0 siblings, 7 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-11  7:14 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko

post merge fixes to restore tunnel offload functionality in mlx5

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

Gregory Etelson (4):
  net/mlx5: fix offloaded tunnel allocation
  net/mlx5: fix tunnel offload hub multi-thread protection
  net/mlx5: fix PMD crash after tunnel offload match rule destruction
  net/mlx5: fix tunnel offload callback names

 drivers/net/mlx5/mlx5.c         |  50 ++---
 drivers/net/mlx5/mlx5.h         |   4 +-
 drivers/net/mlx5/mlx5_flow.c    | 334 +++++++++++++++++++++-----------
 drivers/net/mlx5/mlx5_flow.h    |   6 +-
 drivers/net/mlx5/mlx5_flow_dv.c |  39 ++--
 5 files changed, 275 insertions(+), 158 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-11  7:14 ` Gregory Etelson
  2020-11-11  8:51   ` Slava Ovsiienko
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 32+ messages in thread
From: Gregory Etelson @ 2020-11-11  7:14 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Shahaf Shuler, Viacheslav Ovsiienko,
	Xueming Li

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 41 ++++++++++-------------------
 3 files changed, 40 insertions(+), 55 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..e1faa819a3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_OFFLOAD] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7ee63a7a14..af097d6a7e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_OFFLOAD, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TUNNEL_FLOW_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 92adfcacca..31c9d82b4a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6934,7 +6934,7 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
 
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 			tunnel_flow_tbl_to_id(tte->flow_table));
 	mlx5_free(tte);
 }
@@ -6952,12 +6952,12 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
 			  SOCKET_ID_ANY);
 	if (!tte)
 		goto err;
-	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 			  &tte->flow_table);
 	if (tte->flow_table >= MLX5_MAX_TABLES) {
 		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
 			tte->flow_table);
-		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 				tte->flow_table);
 		goto err;
 	} else if (!tte->flow_table) {
@@ -7465,14 +7465,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7494,39 +7493,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
@ 2020-11-11  7:14 ` Gregory Etelson
  2020-11-11  8:51   ` Slava Ovsiienko
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction Gregory Etelson
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 32+ messages in thread
From: Gregory Etelson @ 2020-11-11  7:14 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Shahaf Shuler, Viacheslav Ovsiienko,
	Suanming Mou

The original patch was removing active tunnel offload objects from a
tunnels db list. That action was leading to a PMD crash.

Current patch isolates tunnels db list into a separate API. That API
manages MT protection of the tunnel offload db.

Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 256 +++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_flow.h |   6 +-
 2 files changed, 192 insertions(+), 70 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 31c9d82b4a..2f01e34033 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -33,6 +33,14 @@
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
@@ -661,29 +669,68 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 	return 0;
 }
 
+struct tunnel_db_element_release_ctx {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint32_t num_elements;
+	struct rte_flow_error *error;
+	int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	if (ctx->num_elements != 1)
+		return false;
+	else if (ctx->items)
+		return ctx->items == &tunnel->item;
+	else if (ctx->actions)
+		return ctx->actions == &tunnel->action;
+
+	return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	ctx->ret = 0;
+	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				      "invalid argument");
+}
+
 static int
 mlx5_flow_item_release(struct rte_eth_dev *dev,
 		       struct rte_flow_item *pmd_items,
 		       uint32_t num_items, struct rte_flow_error *err)
 {
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = pmd_items,
+		.actions = NULL,
+		.num_elements = num_items,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
 }
 
 static int
@@ -691,25 +738,18 @@ mlx5_flow_action_release(struct rte_eth_dev *dev,
 			 struct rte_flow_action *pmd_actions,
 			 uint32_t num_actions, struct rte_flow_error *err)
 {
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = NULL,
+		.actions = pmd_actions,
+		.num_elements = num_actions,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
 
-	return 0;
+	return ctx.ret;
 }
 
 static int
@@ -5889,11 +5929,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
 
-		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
-		LIST_REMOVE(tunnel, chain);
-		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7464,28 +7501,87 @@ static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
+	/* no tunnel hub spinlock protection */
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
+	rte_spinlock_lock(&thub->sl);
+	LIST_REMOVE(tunnel, chain);
+	rte_spinlock_unlock(&thub->sl);
 	mlx5_hlist_destroy(tunnel->groups);
 	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op)
 {
+	bool verdict = false;
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct mlx5_flow_tunnel *tunnel;
 
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+		verdict = match(dev, tunnel, (const void *)ctx);
+		if (verdict)
 			break;
 	}
+	if (!lock_op)
+		rte_spinlock_unlock(&thub->sl);
+	if (verdict && hit)
+		hit(dev, tunnel, ctx);
+	if (!verdict && miss)
+		miss(dev, ctx);
+	if (lock_op)
+		rte_spinlock_unlock(&thub->sl);
 
-	return tun;
+	return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+	uint32_t tunnel_id;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+		     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+		   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct tunnel_db_find_tunnel_id_ctx ctx = {
+		.tunnel_id = id,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+				      find_tunnel_id_hit, NULL, &ctx, true);
+
+	return ctx.tunnel;
 }
 
 static struct mlx5_flow_tunnel *
@@ -7533,38 +7629,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 	return tunnel;
 }
 
+struct tunnel_db_get_tunnel_ctx {
+	const struct rte_flow_tunnel *app_tunnel;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+		       sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	tunnel->refctn++;
+	ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	rte_spinlock_unlock(&thub->sl);
+	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+	ctx->tunnel->refctn = 1;
+	rte_spinlock_lock(&thub->sl);
+	if (ctx->tunnel)
+		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
 		     struct mlx5_flow_tunnel **tunnel)
 {
-	int ret;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (!memcmp(app_tunnel, &tun->app_tunnel,
-			    sizeof(*app_tunnel))) {
-			*tunnel = tun;
-			ret = 0;
-			break;
-		}
-	}
-	if (!tun) {
-		tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
-		if (tun) {
-			LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
-			*tunnel = tun;
-		} else {
-			ret = -ENOMEM;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (tun)
-		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+	struct tunnel_db_get_tunnel_ctx ctx = {
+		.app_tunnel = app_tunnel,
+	};
 
-	return ret;
+	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+				      get_tunnel_miss, &ctx, true);
+	*tunnel = ctx.tunnel;
+	return ctx.tunnel ? 0 : -ENOMEM;
 }
 
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e3a5030785..bdf2c50090 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {
 
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
+	/* Tunnels list
+	 * Access to the list MUST be MT protected
+	 */
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
-	rte_spinlock_t sl;			/* Tunnel list spinlock. */
+	 /* protect access to the tunnels list */
+	rte_spinlock_t sl;
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
@ 2020-11-11  7:14 ` Gregory Etelson
  2020-11-11  8:51   ` Slava Ovsiienko
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names Gregory Etelson
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 32+ messages in thread
From: Gregory Etelson @ 2020-11-11  7:14 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Shahaf Shuler, Viacheslav Ovsiienko,
	Xueming Li

The new flow table resource management API triggered a PMD crash in
tunnel offload mode, when tunnel match flow rule was inserted before
tunnel set rule.

Reason for the crash was double flow table registration. The table was
registered by the tunnel offload code for the first time and once
more by PMD code, as part of general table processing. The table
counter was decremented only once during the rule destruction and
caused a resource leak that triggered the crash.

The patch updates PMD registration with tunnel offload parameters and
removes table registration in tunnel related code.

Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 16 ++++++++++----
 drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++----------------
 2 files changed, 32 insertions(+), 23 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2f01e34033..185b4ba51a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7024,7 +7024,15 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
 	struct mlx5_hlist *group_hash;
 
 	group_hash = tunnel ? tunnel->groups : thub->groups;
-	he = mlx5_hlist_register(group_hash, key.val, NULL);
+	he = mlx5_hlist_lookup(group_hash, key.val, NULL);
+	if (!he) {
+		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - generate table id",
+		dev->data->port_id, key.tunnel_id, group);
+		he = mlx5_hlist_register(group_hash, key.val, NULL);
+	} else {
+		DRV_LOG(DEBUG, "port %u tunnel %u group=%u - skip table id",
+		dev->data->port_id, key.tunnel_id, group);
+	}
 	if (!he)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -7032,8 +7040,8 @@ tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
 					  "tunnel group index not supported");
 	tte = container_of(he, typeof(*tte), hash);
 	*table = tte->flow_table;
-	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
-		dev->data->port_id, key.tunnel_id, group, *table);
+	DRV_LOG(DEBUG, "port %u tunnel %u group=%u table=%u",
+	dev->data->port_id, key.tunnel_id, group, *table);
 	return 0;
 }
 
@@ -7114,7 +7122,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
-		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
 		dev->data->port_id, group, grp_info.transfer,
 		grp_info.external, grp_info.fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 78c710fef9..95165980f4 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
 				   "cannot get table");
 		return NULL;
 	}
+	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
 	return &tbl_data->tbl;
 }
@@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
 		if (he)
 			mlx5_hlist_unregister(tunnel_grp_hash, he);
 		DRV_LOG(DEBUG,
-			"Table_id %#x tunnel %u group %u released.",
+			"Table_id %u tunnel %u group %u released.",
 			table_id,
 			tbl_data->tunnel ?
 			tbl_data->tunnel->tunnel_id : 0,
@@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 			 struct mlx5_flow_dv_matcher *ref,
 			 union mlx5_flow_tbl_key *key,
 			 struct mlx5_flow *dev_flow,
+			 const struct mlx5_flow_tunnel *tunnel,
+			 uint32_t group_id,
 			 struct rte_flow_error *error)
 {
 	struct mlx5_cache_entry *entry;
@@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 		.data = ref,
 	};
 
-	tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
-				       key->domain, false, NULL, 0, 0, error);
+	/**
+	 * tunnel offload API requires this registration for cases when
+	 * tunnel match rule was inserted before tunnel set rule.
+	 */
+	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+				       key->direction, key->domain,
+				       dev_flow->external, tunnel,
+				       group_id, 0, error);
 	if (!tbl)
 		return -rte_errno;	/* No need to refill the error info */
 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
@@ -9605,10 +9615,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		/*
 		 * do not add decap action if match rule drops packet
 		 * HW rejects rules with decap & drop
+		 *
+		 * if tunnel match rule was inserted before matching tunnel set
+		 * rule flow table used in the match rule must be registered.
+		 * current implementation handles that in the
+		 * flow_dv_match_register() at the function end.
 		 */
 		bool add_decap = true;
 		const struct rte_flow_action *ptr = actions;
-		struct mlx5_flow_tbl_resource *tbl;
 
 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
@@ -9625,20 +9639,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 		}
-		/*
-		 * bind table_id with <group, table> for tunnel match rule.
-		 * Tunnel set rule establishes that bind in JUMP action handler.
-		 * Required for scenario when application creates tunnel match
-		 * rule before tunnel set rule.
-		 */
-		tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-					       attr->transfer,
-					       !!dev_flow->external, tunnel,
-					       attr->group, 0, error);
-		if (!tbl)
-			return rte_flow_error_set
-			       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
-			       actions, "cannot register tunnel group");
 	}
 	for (; !actions_end ; actions++) {
 		const struct rte_flow_action_queue *queue;
@@ -10468,7 +10468,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
 	tbl_key.table_id = dev_flow->dv.group;
-	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+				     tunnel, attr->group, error))
 		return -rte_errno;
 	return 0;
 }
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
                   ` (2 preceding siblings ...)
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction Gregory Etelson
@ 2020-11-11  7:14 ` Gregory Etelson
  2020-11-11  8:51   ` Slava Ovsiienko
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 32+ messages in thread
From: Gregory Etelson @ 2020-11-11  7:14 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Shahaf Shuler, Viacheslav Ovsiienko

Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release
callback names to match tunnel offload names pattern.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 185b4ba51a..358a5f4e72 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -715,9 +715,9 @@ tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
 }
 
 static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
-		       struct rte_flow_item *pmd_items,
-		       uint32_t num_items, struct rte_flow_error *err)
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
 {
 	struct tunnel_db_element_release_ctx ctx = {
 		.items = pmd_items,
@@ -734,9 +734,10 @@ mlx5_flow_item_release(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
-			 struct rte_flow_action *pmd_actions,
-			 uint32_t num_actions, struct rte_flow_error *err)
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
 {
 	struct tunnel_db_element_release_ctx ctx = {
 		.items = NULL,
@@ -800,8 +801,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.shared_action_query = mlx5_shared_action_query,
 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
 	.tunnel_match = mlx5_flow_tunnel_match,
-	.tunnel_action_decap_release = mlx5_flow_action_release,
-	.tunnel_item_release = mlx5_flow_item_release,
+	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
@ 2020-11-11  8:51   ` Slava Ovsiienko
  0 siblings, 0 replies; 32+ messages in thread
From: Slava Ovsiienko @ 2020-11-11  8:51 UTC (permalink / raw)
  To: Gregory Etelson, dev
  Cc: Matan Azrad, Raslan Darawsheh, Shahaf Shuler, Xueming(Steven) Li

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Wednesday, November 11, 2020 9:14
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Shahaf
> Shuler <shahafs@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation
> 
> The original patch allocated tunnel offload objects with invalid indexes. As the
> result, PMD tunnel object allocation failed.
> 
> In this patch indexed pool provides both an index and memory for a new
> tunnel offload object.
> Also tunnel offload ipool moved to dv enabled code only.
> 
> Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
@ 2020-11-11  8:51   ` Slava Ovsiienko
  0 siblings, 0 replies; 32+ messages in thread
From: Slava Ovsiienko @ 2020-11-11  8:51 UTC (permalink / raw)
  To: Gregory Etelson, dev
  Cc: Matan Azrad, Raslan Darawsheh, Shahaf Shuler, Suanming Mou

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Wednesday, November 11, 2020 9:14
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Shahaf
> Shuler <shahafs@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Suanming Mou <suanmingm@nvidia.com>
> Subject: [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection
> 
> The original patch was removing active tunnel offload objects from a tunnels
> db list. That action was leading to a PMD crash.
> 
> Current patch isolates tunnels db list into a separate API. That API manages MT
> protection of the tunnel offload db.
> 
> Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe")
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction Gregory Etelson
@ 2020-11-11  8:51   ` Slava Ovsiienko
  0 siblings, 0 replies; 32+ messages in thread
From: Slava Ovsiienko @ 2020-11-11  8:51 UTC (permalink / raw)
  To: Gregory Etelson, dev
  Cc: Matan Azrad, Raslan Darawsheh, Shahaf Shuler, Xueming(Steven) Li

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Wednesday, November 11, 2020 9:14
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Shahaf
> Shuler <shahafs@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>;
> Xueming(Steven) Li <xuemingl@nvidia.com>
> Subject: [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule
> destruction
> 
> The new flow table resource management API triggered a PMD crash in tunnel
> offload mode, when tunnel match flow rule was inserted before tunnel set
> rule.
> 
> Reason for the crash was double flow table registration. The table was
> registered by the tunnel offload code for the first time and once more by PMD
> code, as part of general table processing. The table counter was decremented
> only once during the rule destruction and caused a resource leak that triggered
> the crash.
> 
> The patch updates PMD registration with tunnel offload parameters and
> removes table registration in tunnel related code.
> 
> Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe")
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names Gregory Etelson
@ 2020-11-11  8:51   ` Slava Ovsiienko
  0 siblings, 0 replies; 32+ messages in thread
From: Slava Ovsiienko @ 2020-11-11  8:51 UTC (permalink / raw)
  To: Gregory Etelson, dev; +Cc: Matan Azrad, Raslan Darawsheh, Shahaf Shuler

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Wednesday, November 11, 2020 9:14
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>; Shahaf
> Shuler <shahafs@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Subject: [PATCH 4/4] net/mlx5: fix tunnel offload callback names
> 
> Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release
> callback names to match tunnel offload names pattern.
> 
> Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
                   ` (3 preceding siblings ...)
  2020-11-11  7:14 ` [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names Gregory Etelson
@ 2020-11-16  9:13 ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
                     ` (5 more replies)
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  6 siblings, 6 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland

post merge fixes that restore tunnel offload functionality in mlx5.

v2: resolve compilation error in ubuntu inbox.

v3: re-arrange patches to prevent ubuntu compilation error
 
Gregory Etelson (6):
  net/mlx5: fix tunnel offload callback names
  net/mlx5: fix build with Direct Verbs disabled
  net/mlx5: fix structure passing method in function call
  net/mlx5: fix tunnel offload object allocation
  net/mlx5: fix tunnel offload hub multi-thread protection
  net/mlx5: fix crash in tunnel offload setup

 drivers/net/mlx5/mlx5.c         |   50 +-
 drivers/net/mlx5/mlx5.h         |    4 +-
 drivers/net/mlx5/mlx5_flow.c    | 1169 ++++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow.h    |   15 +-
 drivers/net/mlx5/mlx5_flow_dv.c |   49 +-
 5 files changed, 762 insertions(+), 525 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release
callback names to match tunnel offload names pattern.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 324349ed19..98559ece2b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -662,9 +662,9 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
-		       struct rte_flow_item *pmd_items,
-		       uint32_t num_items, struct rte_flow_error *err)
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -687,9 +687,10 @@ mlx5_flow_item_release(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
-			 struct rte_flow_action *pmd_actions,
-			 uint32_t num_actions, struct rte_flow_error *err)
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -760,8 +761,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.shared_action_query = mlx5_shared_action_query,
 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
 	.tunnel_match = mlx5_flow_tunnel_match,
-	.tunnel_action_decap_release = mlx5_flow_action_release,
-	.tunnel_item_release = mlx5_flow_item_release,
+	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs disabled
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload API is implemented for Direct Verbs environment only.
Current patch re-arranges tunnel related functions for compilation in
non Direct Verbs setups to prevent compilation failures.  The patch
does not introduce new functions.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 935 ++++++++++++++++++++---------------
 drivers/net/mlx5/mlx5_flow.h |   5 +
 2 files changed, 535 insertions(+), 405 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 98559ece2b..e4fe78df4c 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -33,16 +33,35 @@
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
+struct tunnel_default_miss_ctx {
+	uint16_t *queue;
+	__extension__
+	union {
+		struct rte_flow_action_rss action_rss;
+		struct rte_flow_action_queue miss_queue;
+		struct rte_flow_action_jump miss_jump;
+		uint8_t raw[0];
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
-		     const struct rte_flow_tunnel *app_tunnel,
-		     struct mlx5_flow_tunnel **tunnel);
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error);
+
 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 static void mlx5_flow_pop_thread_workspace(void);
 
@@ -606,145 +625,32 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 	return !err_msg;
 }
 
-
 static int
 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
 		    struct rte_flow_tunnel *app_tunnel,
 		    struct rte_flow_action **actions,
 		    uint32_t *num_of_actions,
-		    struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*actions = &tunnel->action;
-	*num_of_actions = 1;
-	return 0;
-}
-
+		    struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 		       struct rte_flow_tunnel *app_tunnel,
 		       struct rte_flow_item **items,
 		       uint32_t *num_of_items,
-		       struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*items = &tunnel->item;
-	*num_of_items = 1;
-	return 0;
-}
-
+		       struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
 			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
-}
-
+			      uint32_t num_items, struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
 				struct rte_flow_action *pmd_actions,
 				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-
-	return 0;
-}
-
+				struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
-				  struct rte_flow_error *err)
-{
-	uint64_t ol_flags = m->ol_flags;
-	const struct mlx5_flow_tbl_data_entry *tble;
-	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
-	if ((ol_flags & mask) != mask)
-		goto err;
-	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
-	if (!tble) {
-		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
-			dev->data->port_id, m->hash.fdir.hi);
-		goto err;
-	}
-	MLX5_ASSERT(tble->tunnel);
-	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
-	info->group_id = tble->group_id;
-	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
-		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
-		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
-	return 0;
-
-err:
-	return rte_flow_error_set(err, EINVAL,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				  "failed to get restore info");
-}
+				  struct rte_flow_error *err);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -4160,174 +4066,38 @@ flow_hairpin_split(struct rte_eth_dev *dev,
 	return 0;
 }
 
-__extension__
-union tunnel_offload_mark {
-	uint32_t val;
-	struct {
-		uint32_t app_reserve:8;
-		uint32_t table_id:15;
-		uint32_t transfer:1;
-		uint32_t _unused_:8;
-	};
-};
-
-struct tunnel_default_miss_ctx {
-	uint16_t *queue;
-	__extension__
-	union {
-		struct rte_flow_action_rss action_rss;
-		struct rte_flow_action_queue miss_queue;
-		struct rte_flow_action_jump miss_jump;
-		uint8_t raw[0];
-	};
-};
-
+/**
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ *   Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, negative value otherwise
+ */
 static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
-			     struct rte_flow *flow,
-			     const struct rte_flow_attr *attr,
-			     const struct rte_flow_action *app_actions,
-			     uint32_t flow_idx,
-			     struct tunnel_default_miss_ctx *ctx,
-			     struct rte_flow_error *error)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow *dev_flow;
-	struct rte_flow_attr miss_attr = *attr;
-	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
-	const struct rte_flow_item miss_items[2] = {
-		{
-			.type = RTE_FLOW_ITEM_TYPE_ETH,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		},
-		{
-			.type = RTE_FLOW_ITEM_TYPE_END,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		}
-	};
-	union tunnel_offload_mark mark_id;
-	struct rte_flow_action_mark miss_mark;
-	struct rte_flow_action miss_actions[3] = {
-		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
-		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
-	};
-	const struct rte_flow_action_jump *jump_data;
-	uint32_t i, flow_table = 0; /* prevent compilation warning */
-	struct flow_grp_info grp_info = {
-		.external = 1,
-		.transfer = attr->transfer,
-		.fdb_def_rule = !!priv->fdb_def_rule,
-		.std_tbl_fix = 0,
-	};
-	int ret;
-
-	if (!attr->transfer) {
-		uint32_t q_size;
-
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
-		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
-					 0, SOCKET_ID_ANY);
-		if (!ctx->queue)
-			return rte_flow_error_set
-				(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid default miss RSS");
-		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
-		ctx->action_rss.level = 0,
-		ctx->action_rss.types = priv->rss_conf.rss_hf,
-		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
-		ctx->action_rss.queue_num = priv->reta_idx_n,
-		ctx->action_rss.key = priv->rss_conf.rss_key,
-		ctx->action_rss.queue = ctx->queue;
-		if (!priv->reta_idx_n || !priv->rxqs_n)
-			return rte_flow_error_set
-				(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
-			ctx->action_rss.types = 0;
-		for (i = 0; i != priv->reta_idx_n; ++i)
-			ctx->queue[i] = (*priv->reta_idx)[i];
-	} else {
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
-		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
-	}
-	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
-	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
-	jump_data = app_actions->conf;
-	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
-	miss_attr.group = jump_data->group;
-	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
-	if (ret)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  NULL, "invalid tunnel id");
-	mark_id.app_reserve = 0;
-	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
-	mark_id.transfer = !!attr->transfer;
-	mark_id._unused_ = 0;
-	miss_mark.id = mark_id.val;
-	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
-				    miss_items, miss_actions, flow_idx, error);
-	if (!dev_flow)
-		return -rte_errno;
-	dev_flow->flow = flow;
-	dev_flow->external = true;
-	dev_flow->tunnel = tunnel;
-	/* Subflow object was created, we must include one in the list. */
-	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
-		      dev_flow->handle, next);
-	DRV_LOG(DEBUG,
-		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
-		dev->data->port_id, tunnel->app_tunnel.type,
-		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
-	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
-				  miss_actions, error);
-	if (!ret)
-		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
-						  error);
-
-	return ret;
-}
-
-/**
- * The last stage of splitting chain, just creates the subflow
- * without any modification.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] flow
- *   Parent flow structure pointer.
- * @param[in, out] sub_flow
- *   Pointer to return the created subflow, may be NULL.
- * @param[in] attr
- *   Flow rule attributes.
- * @param[in] items
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
- * @param[in] flow_split_info
- *   Pointer to flow split info structure.
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- * @return
- *   0 on success, negative value otherwise
- */
-static int
-flow_create_split_inner(struct rte_eth_dev *dev,
-			struct rte_flow *flow,
-			struct mlx5_flow **sub_flow,
-			const struct rte_flow_attr *attr,
-			const struct rte_flow_item items[],
-			const struct rte_flow_action actions[],
-			struct mlx5_flow_split_info *flow_split_info,
-			struct rte_flow_error *error)
+flow_create_split_inner(struct rte_eth_dev *dev,
+			struct rte_flow *flow,
+			struct mlx5_flow **sub_flow,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct mlx5_flow_split_info *flow_split_info,
+			struct rte_flow_error *error)
 {
 	struct mlx5_flow *dev_flow;
 
@@ -6953,26 +6723,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 	sh->cmng.pending_queries--;
 }
 
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_ctx_shared *sh = priv->sh;
-	struct mlx5_hlist_entry *he;
-	union tunnel_offload_mark mbits = { .val = mark };
-	union mlx5_flow_tbl_key table_key = {
-		{
-			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
-			.dummy = 0,
-			.domain = !!mbits.transfer,
-			.direction = 0,
-		}
-	};
-	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
-	return he ?
-	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
 static void
 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
 				   struct mlx5_hlist_entry *entry)
@@ -7017,35 +6767,6 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
 	return NULL;
 }
 
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
-				const struct mlx5_flow_tunnel *tunnel,
-				uint32_t group, uint32_t *table,
-				struct rte_flow_error *error)
-{
-	struct mlx5_hlist_entry *he;
-	struct tunnel_tbl_entry *tte;
-	union tunnel_tbl_key key = {
-		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
-		.group = group
-	};
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_hlist *group_hash;
-
-	group_hash = tunnel ? tunnel->groups : thub->groups;
-	he = mlx5_hlist_register(group_hash, key.val, NULL);
-	if (!he)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-					  NULL,
-					  "tunnel group index not supported");
-	tte = container_of(he, typeof(*tte), hash);
-	*table = tte->flow_table;
-	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
-		dev->data->port_id, key.tunnel_id, group, *table);
-	return 0;
-}
-
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
 		    struct flow_grp_info grp_info, struct rte_flow_error *error)
@@ -7505,64 +7226,263 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
 	return ret;
 }
 
-static void
-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
-		      struct mlx5_flow_tunnel *tunnel)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
-		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
-	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
-}
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
 {
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
-			break;
-	}
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	const struct mlx5_flow_driver_ops *fops;
+	int ret;
+	struct rte_flow_attr attr = { .transfer = 0 };
 
-	return tun;
+	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+	if (ret > 0)
+		ret = -ret;
+	return ret;
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
-			  const struct rte_flow_tunnel *app_tunnel)
+/**
+ * tunnel offload functionalilty is defined for DV environment only
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+__extension__
+union tunnel_offload_mark {
+	uint32_t val;
+	struct {
+		uint32_t app_reserve:8;
+		uint32_t table_id:15;
+		uint32_t transfer:1;
+		uint32_t _unused_:8;
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_tunnel *tunnel;
-	uint32_t id;
+	struct mlx5_flow *dev_flow;
+	struct rte_flow_attr miss_attr = *attr;
+	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+	const struct rte_flow_item miss_items[2] = {
+		{
+			.type = RTE_FLOW_ITEM_TYPE_ETH,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		},
+		{
+			.type = RTE_FLOW_ITEM_TYPE_END,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		}
+	};
+	union tunnel_offload_mark mark_id;
+	struct rte_flow_action_mark miss_mark;
+	struct rte_flow_action miss_actions[3] = {
+		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+	};
+	const struct rte_flow_action_jump *jump_data;
+	uint32_t i, flow_table = 0; /* prevent compilation warning */
+	struct flow_grp_info grp_info = {
+		.external = 1,
+		.transfer = attr->transfer,
+		.fdb_def_rule = !!priv->fdb_def_rule,
+		.std_tbl_fix = 0,
+	};
+	int ret;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
-	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
-		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
+	if (!attr->transfer) {
+		uint32_t q_size;
+
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+					 0, SOCKET_ID_ANY);
+		if (!ctx->queue)
+			return rte_flow_error_set
+				(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid default miss RSS");
+		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+		ctx->action_rss.level = 0,
+		ctx->action_rss.types = priv->rss_conf.rss_hf,
+		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+		ctx->action_rss.queue_num = priv->reta_idx_n,
+		ctx->action_rss.key = priv->rss_conf.rss_key,
+		ctx->action_rss.queue = ctx->queue;
+		if (!priv->reta_idx_n || !priv->rxqs_n)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid port configuration");
+		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+			ctx->action_rss.types = 0;
+		for (i = 0; i != priv->reta_idx_n; ++i)
+			ctx->queue[i] = (*priv->reta_idx)[i];
+	} else {
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+	}
+	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+	jump_data = app_actions->conf;
+	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+	miss_attr.group = jump_data->group;
+	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+				       &flow_table, grp_info, error);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "invalid tunnel id");
+	mark_id.app_reserve = 0;
+	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+	mark_id.transfer = !!attr->transfer;
+	mark_id._unused_ = 0;
+	miss_mark.id = mark_id.val;
+	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+				    miss_items, miss_actions, flow_idx, error);
+	if (!dev_flow)
+		return -rte_errno;
+	dev_flow->flow = flow;
+	dev_flow->external = true;
+	dev_flow->tunnel = tunnel;
+	/* Subflow object was created, we must include one in the list. */
+	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+		      dev_flow->handle, next);
+	DRV_LOG(DEBUG,
+		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+		dev->data->port_id, tunnel->app_tunnel.type,
+		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+				  miss_actions, error);
+	if (!ret)
+		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+						  error);
+
+	return ret;
+}
+
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_hlist_entry *he;
+	union tunnel_offload_mark mbits = { .val = mark };
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+			.dummy = 0,
+			.domain = !!mbits.transfer,
+			.direction = 0,
+		}
+	};
+	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+	return he ?
+	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error)
+{
+	struct mlx5_hlist_entry *he;
+	struct tunnel_tbl_entry *tte;
+	union tunnel_tbl_key key = {
+		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+		.group = group
+	};
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_hlist *group_hash;
+
+	group_hash = tunnel ? tunnel->groups : thub->groups;
+	he = mlx5_hlist_register(group_hash, key.val, NULL);
+	if (!he)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  NULL,
+					  "tunnel group index not supported");
+	tte = container_of(he, typeof(*tte), hash);
+	*table = tte->flow_table;
+	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+		dev->data->port_id, key.tunnel_id, group, *table);
+	return 0;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+		      struct mlx5_flow_tunnel *tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+		dev->data->port_id, tunnel->tunnel_id);
+	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
+			tunnel->tunnel_id);
+	mlx5_hlist_destroy(tunnel->groups);
+	mlx5_free(tunnel);
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (tun->tunnel_id == id)
+			break;
+	}
+
+	return tun;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+			  const struct rte_flow_tunnel *app_tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tunnel *tunnel;
+	uint32_t id;
+
+	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+			  &id);
+	if (id >= MLX5_MAX_TUNNELS) {
+		mlx5_ipool_free(priv->sh->ipool
+				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
+		return NULL;
+	} else if (!id) {
+		return NULL;
+	}
+	/**
+	 * mlx5 flow tunnel is an auxlilary data structure
+	 * It's not part of IO. No need to allocate it from
+	 * huge pages pools dedicated for IO
+	 */
+	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
+			     0, SOCKET_ID_ANY);
+	if (!tunnel) {
+		mlx5_ipool_free(priv->sh->ipool
+				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
@@ -7671,23 +7591,228 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	return err;
 }
 
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
-	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+		    struct rte_flow_tunnel *app_tunnel,
+		    struct rte_flow_action **actions,
+		    uint32_t *num_of_actions,
+		    struct rte_flow_error *error)
+{
+	int ret;
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*actions = &tunnel->action;
+	*num_of_actions = 1;
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+		       struct rte_flow_tunnel *app_tunnel,
+		       struct rte_flow_item **items,
+		       uint32_t *num_of_items,
+		       struct rte_flow_error *error)
 {
-	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	const struct mlx5_flow_driver_ops *fops;
 	int ret;
-	struct rte_flow_attr attr = { .transfer = 0 };
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
-	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
-	if (ret > 0)
-		ret = -ret;
-	return ret;
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*items = &tunnel->item;
+	*num_of_items = 1;
+	return 0;
+}
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->item == pmd_items) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_items != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->action == pmd_actions) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_actions != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+
+	return 0;
 }
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err)
+{
+	uint64_t ol_flags = m->ol_flags;
+	const struct mlx5_flow_tbl_data_entry *tble;
+	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+	if ((ol_flags & mask) != mask)
+		goto err;
+	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+	if (!tble) {
+		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+			dev->data->port_id, m->hash.fdir.hi);
+		goto err;
+	}
+	MLX5_ASSERT(tble->tunnel);
+	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+	info->group_id = tble->group_id;
+	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
+		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+	return 0;
+
+err:
+	return rte_flow_error_set(err, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "failed to get restore info");
+}
+
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+			   __rte_unused struct rte_flow_tunnel *app_tunnel,
+			   __rte_unused struct rte_flow_action **actions,
+			   __rte_unused uint32_t *num_of_actions,
+			   __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+		       __rte_unused struct rte_flow_tunnel *app_tunnel,
+		       __rte_unused struct rte_flow_item **items,
+		       __rte_unused uint32_t *num_of_items,
+		       __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+			      __rte_unused struct rte_flow_item *pmd_items,
+			      __rte_unused uint32_t num_items,
+			      __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused struct rte_flow_action *pmd_action,
+				__rte_unused uint32_t num_actions,
+				__rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+				  __rte_unused struct rte_mbuf *m,
+				  __rte_unused struct rte_flow_restore_info *i,
+				  __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+			     __rte_unused struct rte_flow *flow,
+			     __rte_unused const struct rte_flow_attr *attr,
+			     __rte_unused const struct rte_flow_action *actions,
+			     __rte_unused uint32_t flow_idx,
+			     __rte_unused struct tunnel_default_miss_ctx *ctx,
+			     __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+		    __rte_unused uint32_t id)
+{
+	return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+		      __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused const struct mlx5_flow_tunnel *t,
+				__rte_unused uint32_t group,
+				__rte_unused uint32_t *table,
+				struct rte_flow_error *error)
+{
+	return rte_flow_error_set(error, ENOTSUP,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "tunnel offload requires DV support");
+}
+
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5fac8672fc..fbc6173fcb 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -991,8 +991,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)
 static inline bool
 is_tunnel_offload_active(struct rte_eth_dev *dev)
 {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_priv *priv = dev->data->dev_private;
 	return !!priv->config.dv_miss_info;
+#else
+	RTE_SET_USED(dev);
+	return false;
+#endif
 }
 
 static inline bool
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 3/6] net/mlx5: fix structure passing method in function call
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload implementation introduced 64 bit-field flow_grp_info
structure. Since the structure size is 64 bits, the code passed that
type by value in function calls.

The patch changes that structure passing method to reference.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 20 +++++++++++---------
 drivers/net/mlx5/mlx5_flow.h    |  4 ++--
 drivers/net/mlx5/mlx5_flow_dv.c | 10 +++++-----
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e4fe78df4c..4216d3d18d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6769,9 +6769,11 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
 
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
-		    struct flow_grp_info grp_info, struct rte_flow_error *error)
+		    const struct flow_grp_info *grp_info,
+		    struct rte_flow_error *error)
 {
-	if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+	if (grp_info->transfer && grp_info->external &&
+	    grp_info->fdb_def_rule) {
 		if (group == UINT32_MAX)
 			return rte_flow_error_set
 						(error, EINVAL,
@@ -6828,25 +6830,25 @@ int
 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			 const struct mlx5_flow_tunnel *tunnel,
 			 uint32_t group, uint32_t *table,
-			 struct flow_grp_info grp_info,
+			 const struct flow_grp_info *grp_info,
 			 struct rte_flow_error *error)
 {
 	int ret;
 	bool standard_translation;
 
-	if (!grp_info.skip_scale && grp_info.external &&
+	if (!grp_info->skip_scale && grp_info->external &&
 	    group < MLX5_MAX_TABLES_EXTERNAL)
 		group *= MLX5_FLOW_TABLE_FACTOR;
 	if (is_tunnel_offload_active(dev)) {
-		standard_translation = !grp_info.external ||
-					grp_info.std_tbl_fix;
+		standard_translation = !grp_info->external ||
+					grp_info->std_tbl_fix;
 	} else {
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
 		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
-		dev->data->port_id, group, grp_info.transfer,
-		grp_info.external, grp_info.fdb_def_rule,
+		dev->data->port_id, group, grp_info->transfer,
+		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
 	if (standard_translation)
 		ret = flow_group_to_table(dev->data->port_id, group, table,
@@ -7343,7 +7345,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
 	miss_attr.group = jump_data->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
+				       &flow_table, &grp_info, error);
 	if (ret)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index fbc6173fcb..c33c0fee7c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1273,8 +1273,8 @@ tunnel_use_standard_attr_group_translate
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			     const struct mlx5_flow_tunnel *tunnel,
 			     uint32_t group, uint32_t *table,
-			     struct flow_grp_info flags,
-				 struct rte_flow_error *error);
+			     const struct flow_grp_info *flags,
+			     struct rte_flow_error *error);
 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 				     int tunnel, uint64_t layer_types,
 				     uint64_t hash_fields);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 62d9ca9ffb..25ab9adee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3935,7 +3935,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
 	target_group =
 		((const struct rte_flow_action_jump *)action->conf)->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	if (attributes->group == target_group &&
@@ -5103,7 +5103,7 @@ static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
 			    const struct mlx5_flow_tunnel *tunnel,
 			    const struct rte_flow_attr *attributes,
-			    struct flow_grp_info grp_info,
+			    const struct flow_grp_info *grp_info,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -5258,7 +5258,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	}
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
-	ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
 	if (ret < 0)
 		return ret;
 	is_root = (uint64_t)ret;
@@ -9597,7 +9597,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	dev_flow->dv.group = table;
@@ -9944,7 +9944,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			ret = mlx5_flow_group_to_table(dev, tunnel,
 						       jump_group,
 						       &table,
-						       grp_info, error);
+						       &grp_info, error);
 			if (ret)
 				return ret;
 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 4/6] net/mlx5: fix tunnel offload object allocation
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (2 preceding siblings ...)
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 35 ++++++++-----------------
 3 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..31011c3a72 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_ID] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TNL_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7ee63a7a14..1f2b873942 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4216d3d18d..a9ece25e65 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7432,14 +7432,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7461,39 +7460,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (3 preceding siblings ...)
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Suanming Mou

The original patch was removing active tunnel offload objects from a
tunnels db list without checking its reference counter value.
That action was leading to a PMD crash.

Current patch isolates tunnels db list into a separate API. That API
manages MT protection of the tunnel offload db.

Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_flow.h |   6 +-
 2 files changed, 195 insertions(+), 77 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a9ece25e65..6efe799a2d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5639,11 +5639,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
 
-		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
-		LIST_REMOVE(tunnel, chain);
-		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7264,6 +7261,15 @@ union tunnel_offload_mark {
 	};
 };
 
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op);
+
 static int
 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 			     struct rte_flow *flow,
@@ -7441,18 +7447,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op)
 {
+	bool verdict = false;
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct mlx5_flow_tunnel *tunnel;
 
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+		verdict = match(dev, tunnel, (const void *)ctx);
+		if (verdict)
 			break;
 	}
+	if (!lock_op)
+		rte_spinlock_unlock(&thub->sl);
+	if (verdict && hit)
+		hit(dev, tunnel, ctx);
+	if (!verdict && miss)
+		miss(dev, ctx);
+	if (lock_op)
+		rte_spinlock_unlock(&thub->sl);
 
-	return tun;
+	return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+	uint32_t tunnel_id;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+		     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+		   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct tunnel_db_find_tunnel_id_ctx ctx = {
+		.tunnel_id = id,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+				      find_tunnel_id_hit, NULL, &ctx, true);
+
+	return ctx.tunnel;
 }
 
 static struct mlx5_flow_tunnel *
@@ -7500,38 +7560,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 	return tunnel;
 }
 
+struct tunnel_db_get_tunnel_ctx {
+	const struct rte_flow_tunnel *app_tunnel;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+		       sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	tunnel->refctn++;
+	ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	rte_spinlock_unlock(&thub->sl);
+	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+	ctx->tunnel->refctn = 1;
+	rte_spinlock_lock(&thub->sl);
+	if (ctx->tunnel)
+		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
 		     struct mlx5_flow_tunnel **tunnel)
 {
-	int ret;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (!memcmp(app_tunnel, &tun->app_tunnel,
-			    sizeof(*app_tunnel))) {
-			*tunnel = tun;
-			ret = 0;
-			break;
-		}
-	}
-	if (!tun) {
-		tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
-		if (tun) {
-			LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
-			*tunnel = tun;
-		} else {
-			ret = -ENOMEM;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (tun)
-		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+	struct tunnel_db_get_tunnel_ctx ctx = {
+		.app_tunnel = app_tunnel,
+	};
 
-	return ret;
+	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+				      get_tunnel_miss, &ctx, true);
+	*tunnel = ctx.tunnel;
+	return ctx.tunnel ? 0 : -ENOMEM;
 }
 
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
@@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 	*num_of_items = 1;
 	return 0;
 }
+
+struct tunnel_db_element_release_ctx {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint32_t num_elements;
+	struct rte_flow_error *error;
+	int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	if (ctx->num_elements != 1)
+		return false;
+	else if (ctx->items)
+		return ctx->items == &tunnel->item;
+	else if (ctx->actions)
+		return ctx->actions == &tunnel->action;
+
+	return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	ctx->ret = 0;
+	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				      "invalid argument");
+}
+
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
-			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+		       struct rte_flow_item *pmd_items,
+		       uint32_t num_items, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = pmd_items,
+		.actions = NULL,
+		.num_elements = num_items,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
 }
 
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
-				struct rte_flow_action *pmd_actions,
-				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+			 struct rte_flow_action *pmd_actions,
+			 uint32_t num_actions, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = NULL,
+		.actions = pmd_actions,
+		.num_elements = num_actions,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
 
-	return 0;
+	return ctx.ret;
 }
 
 static int
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c33c0fee7c..f64384217f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {
 
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
+	/* Tunnels list
+	 * Access to the list MUST be MT protected
+	 */
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
-	rte_spinlock_t sl;			/* Tunnel list spinlock. */
+	 /* protect access to the tunnels list */
+	rte_spinlock_t sl;
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v3 6/6] net/mlx5: fix crash in tunnel offload setup
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (4 preceding siblings ...)
  2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
@ 2020-11-16  9:13   ` Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:13 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The new flow table resource management API triggered a PMD crash in
tunnel offload mode, when tunnel match flow rule was inserted before
tunnel set rule.

Reason for the crash was double flow table registration. The table was
registered by the tunnel offload code for the first time and once
more by PMD code, as part of general table processing. The table
counter was decremented only once during the rule destruction and
caused a resource leak that triggered the crash.

The patch updates PMD registration with tunnel offload parameters and
removes table registration in tunnel related code.

Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++----------------
 2 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 6efe799a2d..b0187b68e6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6843,7 +6843,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
-		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
 		dev->data->port_id, group, grp_info->transfer,
 		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 25ab9adee6..5e230a3c25 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
 				   "cannot get table");
 		return NULL;
 	}
+	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
 	return &tbl_data->tbl;
 }
@@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
 		if (he)
 			mlx5_hlist_unregister(tunnel_grp_hash, he);
 		DRV_LOG(DEBUG,
-			"Table_id %#x tunnel %u group %u released.",
+			"Table_id %u tunnel %u group %u released.",
 			table_id,
 			tbl_data->tunnel ?
 			tbl_data->tunnel->tunnel_id : 0,
@@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 			 struct mlx5_flow_dv_matcher *ref,
 			 union mlx5_flow_tbl_key *key,
 			 struct mlx5_flow *dev_flow,
+			 const struct mlx5_flow_tunnel *tunnel,
+			 uint32_t group_id,
 			 struct rte_flow_error *error)
 {
 	struct mlx5_cache_entry *entry;
@@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 		.data = ref,
 	};
 
-	tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
-				       key->domain, false, NULL, 0, 0, error);
+	/**
+	 * tunnel offload API requires this registration for cases when
+	 * tunnel match rule was inserted before tunnel set rule.
+	 */
+	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+				       key->direction, key->domain,
+				       dev_flow->external, tunnel,
+				       group_id, 0, error);
 	if (!tbl)
 		return -rte_errno;	/* No need to refill the error info */
 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
@@ -9611,10 +9621,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		/*
 		 * do not add decap action if match rule drops packet
 		 * HW rejects rules with decap & drop
+		 *
+		 * if tunnel match rule was inserted before matching tunnel set
+		 * rule flow table used in the match rule must be registered.
+		 * current implementation handles that in the
+		 * flow_dv_match_register() at the function end.
 		 */
 		bool add_decap = true;
 		const struct rte_flow_action *ptr = actions;
-		struct mlx5_flow_tbl_resource *tbl;
 
 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
@@ -9631,20 +9645,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 		}
-		/*
-		 * bind table_id with <group, table> for tunnel match rule.
-		 * Tunnel set rule establishes that bind in JUMP action handler.
-		 * Required for scenario when application creates tunnel match
-		 * rule before tunnel set rule.
-		 */
-		tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-					       attr->transfer,
-					       !!dev_flow->external, tunnel,
-					       attr->group, 0, error);
-		if (!tbl)
-			return rte_flow_error_set
-			       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
-			       actions, "cannot register tunnel group");
 	}
 	for (; !actions_end ; actions++) {
 		const struct rte_flow_action_queue *queue;
@@ -10474,7 +10474,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
 	tbl_key.table_id = dev_flow->dv.group;
-	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+				     tunnel, attr->group, error))
 		return -rte_errno;
 	return 0;
 }
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
                   ` (4 preceding siblings ...)
  2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-16  9:48 ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
                     ` (5 more replies)
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  6 siblings, 6 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:48 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland

post merge fixes that restore tunnel offload functionality in mlx5.

v2: resolve compilation error in ubuntu inbox.
v3: re-arrange patches to prevent ubuntu compilation error.
v4: more compilation fixes.

Gregory Etelson (6):
  net/mlx5: fix tunnel offload callback names
  net/mlx5: fix build with Direct Verbs disabled
  net/mlx5: fix structure passing method in function call
  net/mlx5: fix tunnel offload object allocation
  net/mlx5: fix tunnel offload hub multi-thread protection
  net/mlx5: fix crash in tunnel offload setup

 drivers/net/mlx5/mlx5.c         |   50 +-
 drivers/net/mlx5/mlx5.h         |    4 +-
 drivers/net/mlx5/mlx5_flow.c    | 1254 ++++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow.h    |   15 +-
 drivers/net/mlx5/mlx5_flow_dv.c |   49 +-
 5 files changed, 807 insertions(+), 565 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release
callback names to match tunnel offload names pattern.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 324349ed19..98559ece2b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -662,9 +662,9 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
-		       struct rte_flow_item *pmd_items,
-		       uint32_t num_items, struct rte_flow_error *err)
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -687,9 +687,10 @@ mlx5_flow_item_release(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
-			 struct rte_flow_action *pmd_actions,
-			 uint32_t num_actions, struct rte_flow_error *err)
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -760,8 +761,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.shared_action_query = mlx5_shared_action_query,
 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
 	.tunnel_match = mlx5_flow_tunnel_match,
-	.tunnel_action_decap_release = mlx5_flow_action_release,
-	.tunnel_item_release = mlx5_flow_item_release,
+	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 2/6] net/mlx5: fix build with Direct Verbs disabled
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload API is implemented for Direct Verbs environment only.
Current patch re-arranges tunnel related functions for compilation in
non Direct Verbs setups to prevent compilation failures.  The patch
does not introduce new functions.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 1004 +++++++++++++++++++---------------
 drivers/net/mlx5/mlx5_flow.h |    5 +
 2 files changed, 572 insertions(+), 437 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 98559ece2b..7d32cac384 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -33,16 +33,35 @@
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
+struct tunnel_default_miss_ctx {
+	uint16_t *queue;
+	__extension__
+	union {
+		struct rte_flow_action_rss action_rss;
+		struct rte_flow_action_queue miss_queue;
+		struct rte_flow_action_jump miss_jump;
+		uint8_t raw[0];
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
-		     const struct rte_flow_tunnel *app_tunnel,
-		     struct mlx5_flow_tunnel **tunnel);
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error);
+
 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 static void mlx5_flow_pop_thread_workspace(void);
 
@@ -606,145 +625,32 @@ mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
 	return !err_msg;
 }
 
-
 static int
 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
 		    struct rte_flow_tunnel *app_tunnel,
 		    struct rte_flow_action **actions,
 		    uint32_t *num_of_actions,
-		    struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*actions = &tunnel->action;
-	*num_of_actions = 1;
-	return 0;
-}
-
+		    struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 		       struct rte_flow_tunnel *app_tunnel,
 		       struct rte_flow_item **items,
 		       uint32_t *num_of_items,
-		       struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*items = &tunnel->item;
-	*num_of_items = 1;
-	return 0;
-}
-
+		       struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
 			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
-}
-
+			      uint32_t num_items, struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
 				struct rte_flow_action *pmd_actions,
 				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-
-	return 0;
-}
-
+				struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
-				  struct rte_flow_error *err)
-{
-	uint64_t ol_flags = m->ol_flags;
-	const struct mlx5_flow_tbl_data_entry *tble;
-	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
-	if ((ol_flags & mask) != mask)
-		goto err;
-	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
-	if (!tble) {
-		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
-			dev->data->port_id, m->hash.fdir.hi);
-		goto err;
-	}
-	MLX5_ASSERT(tble->tunnel);
-	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
-	info->group_id = tble->group_id;
-	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
-		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
-		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
-	return 0;
-
-err:
-	return rte_flow_error_set(err, EINVAL,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				  "failed to get restore info");
-}
+				  struct rte_flow_error *err);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -4160,174 +4066,38 @@ flow_hairpin_split(struct rte_eth_dev *dev,
 	return 0;
 }
 
-__extension__
-union tunnel_offload_mark {
-	uint32_t val;
-	struct {
-		uint32_t app_reserve:8;
-		uint32_t table_id:15;
-		uint32_t transfer:1;
-		uint32_t _unused_:8;
-	};
-};
-
-struct tunnel_default_miss_ctx {
-	uint16_t *queue;
-	__extension__
-	union {
-		struct rte_flow_action_rss action_rss;
-		struct rte_flow_action_queue miss_queue;
-		struct rte_flow_action_jump miss_jump;
-		uint8_t raw[0];
-	};
-};
-
+/**
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ *   Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, negative value otherwise
+ */
 static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
-			     struct rte_flow *flow,
-			     const struct rte_flow_attr *attr,
-			     const struct rte_flow_action *app_actions,
-			     uint32_t flow_idx,
-			     struct tunnel_default_miss_ctx *ctx,
-			     struct rte_flow_error *error)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow *dev_flow;
-	struct rte_flow_attr miss_attr = *attr;
-	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
-	const struct rte_flow_item miss_items[2] = {
-		{
-			.type = RTE_FLOW_ITEM_TYPE_ETH,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		},
-		{
-			.type = RTE_FLOW_ITEM_TYPE_END,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		}
-	};
-	union tunnel_offload_mark mark_id;
-	struct rte_flow_action_mark miss_mark;
-	struct rte_flow_action miss_actions[3] = {
-		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
-		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
-	};
-	const struct rte_flow_action_jump *jump_data;
-	uint32_t i, flow_table = 0; /* prevent compilation warning */
-	struct flow_grp_info grp_info = {
-		.external = 1,
-		.transfer = attr->transfer,
-		.fdb_def_rule = !!priv->fdb_def_rule,
-		.std_tbl_fix = 0,
-	};
-	int ret;
-
-	if (!attr->transfer) {
-		uint32_t q_size;
-
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
-		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
-					 0, SOCKET_ID_ANY);
-		if (!ctx->queue)
-			return rte_flow_error_set
-				(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid default miss RSS");
-		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
-		ctx->action_rss.level = 0,
-		ctx->action_rss.types = priv->rss_conf.rss_hf,
-		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
-		ctx->action_rss.queue_num = priv->reta_idx_n,
-		ctx->action_rss.key = priv->rss_conf.rss_key,
-		ctx->action_rss.queue = ctx->queue;
-		if (!priv->reta_idx_n || !priv->rxqs_n)
-			return rte_flow_error_set
-				(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
-			ctx->action_rss.types = 0;
-		for (i = 0; i != priv->reta_idx_n; ++i)
-			ctx->queue[i] = (*priv->reta_idx)[i];
-	} else {
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
-		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
-	}
-	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
-	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
-	jump_data = app_actions->conf;
-	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
-	miss_attr.group = jump_data->group;
-	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
-	if (ret)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  NULL, "invalid tunnel id");
-	mark_id.app_reserve = 0;
-	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
-	mark_id.transfer = !!attr->transfer;
-	mark_id._unused_ = 0;
-	miss_mark.id = mark_id.val;
-	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
-				    miss_items, miss_actions, flow_idx, error);
-	if (!dev_flow)
-		return -rte_errno;
-	dev_flow->flow = flow;
-	dev_flow->external = true;
-	dev_flow->tunnel = tunnel;
-	/* Subflow object was created, we must include one in the list. */
-	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
-		      dev_flow->handle, next);
-	DRV_LOG(DEBUG,
-		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
-		dev->data->port_id, tunnel->app_tunnel.type,
-		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
-	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
-				  miss_actions, error);
-	if (!ret)
-		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
-						  error);
-
-	return ret;
-}
-
-/**
- * The last stage of splitting chain, just creates the subflow
- * without any modification.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] flow
- *   Parent flow structure pointer.
- * @param[in, out] sub_flow
- *   Pointer to return the created subflow, may be NULL.
- * @param[in] attr
- *   Flow rule attributes.
- * @param[in] items
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
- * @param[in] flow_split_info
- *   Pointer to flow split info structure.
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- * @return
- *   0 on success, negative value otherwise
- */
-static int
-flow_create_split_inner(struct rte_eth_dev *dev,
-			struct rte_flow *flow,
-			struct mlx5_flow **sub_flow,
-			const struct rte_flow_attr *attr,
-			const struct rte_flow_item items[],
-			const struct rte_flow_action actions[],
-			struct mlx5_flow_split_info *flow_split_info,
-			struct rte_flow_error *error)
+flow_create_split_inner(struct rte_eth_dev *dev,
+			struct rte_flow *flow,
+			struct mlx5_flow **sub_flow,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct mlx5_flow_split_info *flow_split_info,
+			struct rte_flow_error *error)
 {
 	struct mlx5_flow *dev_flow;
 
@@ -6953,99 +6723,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 	sh->cmng.pending_queries--;
 }
 
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_ctx_shared *sh = priv->sh;
-	struct mlx5_hlist_entry *he;
-	union tunnel_offload_mark mbits = { .val = mark };
-	union mlx5_flow_tbl_key table_key = {
-		{
-			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
-			.dummy = 0,
-			.domain = !!mbits.transfer,
-			.direction = 0,
-		}
-	};
-	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
-	return he ?
-	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
-static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
-				   struct mlx5_hlist_entry *entry)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
-
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-			tunnel_flow_tbl_to_id(tte->flow_table));
-	mlx5_free(tte);
-}
-
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
-				   uint64_t key __rte_unused,
-				   void *ctx __rte_unused)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte;
-
-	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
-			  sizeof(*tte), 0,
-			  SOCKET_ID_ANY);
-	if (!tte)
-		goto err;
-	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-			  &tte->flow_table);
-	if (tte->flow_table >= MLX5_MAX_TABLES) {
-		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
-			tte->flow_table);
-		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-				tte->flow_table);
-		goto err;
-	} else if (!tte->flow_table) {
-		goto err;
-	}
-	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
-	return &tte->hash;
-err:
-	if (tte)
-		mlx5_free(tte);
-	return NULL;
-}
-
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
-				const struct mlx5_flow_tunnel *tunnel,
-				uint32_t group, uint32_t *table,
-				struct rte_flow_error *error)
-{
-	struct mlx5_hlist_entry *he;
-	struct tunnel_tbl_entry *tte;
-	union tunnel_tbl_key key = {
-		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
-		.group = group
-	};
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_hlist *group_hash;
-
-	group_hash = tunnel ? tunnel->groups : thub->groups;
-	he = mlx5_hlist_register(group_hash, key.val, NULL);
-	if (!he)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-					  NULL,
-					  "tunnel group index not supported");
-	tte = container_of(he, typeof(*tte), hash);
-	*table = tte->flow_table;
-	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
-		dev->data->port_id, key.tunnel_id, group, *table);
-	return 0;
-}
-
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
 		    struct flow_grp_info grp_info, struct rte_flow_error *error)
@@ -7505,51 +7182,294 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
 	return ret;
 }
 
-static void
-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
-		      struct mlx5_flow_tunnel *tunnel)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
-		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
-	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
-}
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
 {
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
-			break;
-	}
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	const struct mlx5_flow_driver_ops *fops;
+	int ret;
+	struct rte_flow_attr attr = { .transfer = 0 };
 
-	return tun;
+	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+	if (ret > 0)
+		ret = -ret;
+	return ret;
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
-			  const struct rte_flow_tunnel *app_tunnel)
+/**
+ * tunnel offload functionalilty is defined for DV environment only
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+__extension__
+union tunnel_offload_mark {
+	uint32_t val;
+	struct {
+		uint32_t app_reserve:8;
+		uint32_t table_id:15;
+		uint32_t transfer:1;
+		uint32_t _unused_:8;
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_tunnel *tunnel;
-	uint32_t id;
-
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
-	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
-		return NULL;
-	} else if (!id) {
+	struct mlx5_flow *dev_flow;
+	struct rte_flow_attr miss_attr = *attr;
+	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+	const struct rte_flow_item miss_items[2] = {
+		{
+			.type = RTE_FLOW_ITEM_TYPE_ETH,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		},
+		{
+			.type = RTE_FLOW_ITEM_TYPE_END,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		}
+	};
+	union tunnel_offload_mark mark_id;
+	struct rte_flow_action_mark miss_mark;
+	struct rte_flow_action miss_actions[3] = {
+		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+	};
+	const struct rte_flow_action_jump *jump_data;
+	uint32_t i, flow_table = 0; /* prevent compilation warning */
+	struct flow_grp_info grp_info = {
+		.external = 1,
+		.transfer = attr->transfer,
+		.fdb_def_rule = !!priv->fdb_def_rule,
+		.std_tbl_fix = 0,
+	};
+	int ret;
+
+	if (!attr->transfer) {
+		uint32_t q_size;
+
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+					 0, SOCKET_ID_ANY);
+		if (!ctx->queue)
+			return rte_flow_error_set
+				(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid default miss RSS");
+		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+		ctx->action_rss.level = 0,
+		ctx->action_rss.types = priv->rss_conf.rss_hf,
+		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+		ctx->action_rss.queue_num = priv->reta_idx_n,
+		ctx->action_rss.key = priv->rss_conf.rss_key,
+		ctx->action_rss.queue = ctx->queue;
+		if (!priv->reta_idx_n || !priv->rxqs_n)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid port configuration");
+		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+			ctx->action_rss.types = 0;
+		for (i = 0; i != priv->reta_idx_n; ++i)
+			ctx->queue[i] = (*priv->reta_idx)[i];
+	} else {
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+	}
+	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+	jump_data = app_actions->conf;
+	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+	miss_attr.group = jump_data->group;
+	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+				       &flow_table, grp_info, error);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "invalid tunnel id");
+	mark_id.app_reserve = 0;
+	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+	mark_id.transfer = !!attr->transfer;
+	mark_id._unused_ = 0;
+	miss_mark.id = mark_id.val;
+	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+				    miss_items, miss_actions, flow_idx, error);
+	if (!dev_flow)
+		return -rte_errno;
+	dev_flow->flow = flow;
+	dev_flow->external = true;
+	dev_flow->tunnel = tunnel;
+	/* Subflow object was created, we must include one in the list. */
+	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+		      dev_flow->handle, next);
+	DRV_LOG(DEBUG,
+		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+		dev->data->port_id, tunnel->app_tunnel.type,
+		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+				  miss_actions, error);
+	if (!ret)
+		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+						  error);
+
+	return ret;
+}
+
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_hlist_entry *he;
+	union tunnel_offload_mark mbits = { .val = mark };
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+			.dummy = 0,
+			.domain = !!mbits.transfer,
+			.direction = 0,
+		}
+	};
+	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+	return he ?
+	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+				   struct mlx5_hlist_entry *entry)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+			tunnel_flow_tbl_to_id(tte->flow_table));
+	mlx5_free(tte);
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
+				   uint64_t key __rte_unused,
+				   void *ctx __rte_unused)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte;
+
+	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+			  sizeof(*tte), 0,
+			  SOCKET_ID_ANY);
+	if (!tte)
+		goto err;
+	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+			  &tte->flow_table);
+	if (tte->flow_table >= MLX5_MAX_TABLES) {
+		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+			tte->flow_table);
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+				tte->flow_table);
+		goto err;
+	} else if (!tte->flow_table) {
+		goto err;
+	}
+	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+	return &tte->hash;
+err:
+	if (tte)
+		mlx5_free(tte);
+	return NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error)
+{
+	struct mlx5_hlist_entry *he;
+	struct tunnel_tbl_entry *tte;
+	union tunnel_tbl_key key = {
+		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+		.group = group
+	};
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_hlist *group_hash;
+
+	group_hash = tunnel ? tunnel->groups : thub->groups;
+	he = mlx5_hlist_register(group_hash, key.val, NULL);
+	if (!he)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  NULL,
+					  "tunnel group index not supported");
+	tte = container_of(he, typeof(*tte), hash);
+	*table = tte->flow_table;
+	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+		dev->data->port_id, key.tunnel_id, group, *table);
+	return 0;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+		      struct mlx5_flow_tunnel *tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+		dev->data->port_id, tunnel->tunnel_id);
+	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
+			tunnel->tunnel_id);
+	mlx5_hlist_destroy(tunnel->groups);
+	mlx5_free(tunnel);
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (tun->tunnel_id == id)
+			break;
+	}
+
+	return tun;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+			  const struct rte_flow_tunnel *app_tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tunnel *tunnel;
+	uint32_t id;
+
+	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+			  &id);
+	if (id >= MLX5_MAX_TUNNELS) {
+		mlx5_ipool_free(priv->sh->ipool
+				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
+		return NULL;
+	} else if (!id) {
 		return NULL;
 	}
 	/**
@@ -7671,23 +7591,233 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	return err;
 }
 
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
-	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+		    struct rte_flow_tunnel *app_tunnel,
+		    struct rte_flow_action **actions,
+		    uint32_t *num_of_actions,
+		    struct rte_flow_error *error)
+{
+	int ret;
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*actions = &tunnel->action;
+	*num_of_actions = 1;
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+		       struct rte_flow_tunnel *app_tunnel,
+		       struct rte_flow_item **items,
+		       uint32_t *num_of_items,
+		       struct rte_flow_error *error)
 {
-	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	const struct mlx5_flow_driver_ops *fops;
 	int ret;
-	struct rte_flow_attr attr = { .transfer = 0 };
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
-	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
-	if (ret > 0)
-		ret = -ret;
-	return ret;
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*items = &tunnel->item;
+	*num_of_items = 1;
+	return 0;
 }
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->item == pmd_items) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_items != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->action == pmd_actions) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_actions != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err)
+{
+	uint64_t ol_flags = m->ol_flags;
+	const struct mlx5_flow_tbl_data_entry *tble;
+	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+	if ((ol_flags & mask) != mask)
+		goto err;
+	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+	if (!tble) {
+		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+			dev->data->port_id, m->hash.fdir.hi);
+		goto err;
+	}
+	MLX5_ASSERT(tble->tunnel);
+	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+	info->group_id = tble->group_id;
+	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
+		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+	return 0;
+
+err:
+	return rte_flow_error_set(err, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "failed to get restore info");
+}
+
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+			   __rte_unused struct rte_flow_tunnel *app_tunnel,
+			   __rte_unused struct rte_flow_action **actions,
+			   __rte_unused uint32_t *num_of_actions,
+			   __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+		       __rte_unused struct rte_flow_tunnel *app_tunnel,
+		       __rte_unused struct rte_flow_item **items,
+		       __rte_unused uint32_t *num_of_items,
+		       __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+			      __rte_unused struct rte_flow_item *pmd_items,
+			      __rte_unused uint32_t num_items,
+			      __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused struct rte_flow_action *pmd_action,
+				__rte_unused uint32_t num_actions,
+				__rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+				  __rte_unused struct rte_mbuf *m,
+				  __rte_unused struct rte_flow_restore_info *i,
+				  __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+			     __rte_unused struct rte_flow *flow,
+			     __rte_unused const struct rte_flow_attr *attr,
+			     __rte_unused const struct rte_flow_action *actions,
+			     __rte_unused uint32_t flow_idx,
+			     __rte_unused struct tunnel_default_miss_ctx *ctx,
+			     __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+		    __rte_unused uint32_t id)
+{
+	return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+		      __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused const struct mlx5_flow_tunnel *t,
+				__rte_unused uint32_t group,
+				__rte_unused uint32_t *table,
+				struct rte_flow_error *error)
+{
+	return rte_flow_error_set(error, ENOTSUP,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "tunnel offload requires DV support");
+}
+
+void mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
+			     __rte_unused  uint16_t port_id)
+{
+	return;
+}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5fac8672fc..fbc6173fcb 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -991,8 +991,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)
 static inline bool
 is_tunnel_offload_active(struct rte_eth_dev *dev)
 {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_priv *priv = dev->data->dev_private;
 	return !!priv->config.dv_miss_info;
+#else
+	RTE_SET_USED(dev);
+	return false;
+#endif
 }
 
 static inline bool
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 3/6] net/mlx5: fix structure passing method in function call
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload implementation introduced 64 bit-field flow_grp_info
structure. Since the structure size is 64 bits, the code passed that
type by value in function calls.

The patch changes that structure passing method to reference.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 20 +++++++++++---------
 drivers/net/mlx5/mlx5_flow.h    |  4 ++--
 drivers/net/mlx5/mlx5_flow_dv.c | 10 +++++-----
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 7d32cac384..29785e8891 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6725,9 +6725,11 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
-		    struct flow_grp_info grp_info, struct rte_flow_error *error)
+		    const struct flow_grp_info *grp_info,
+		    struct rte_flow_error *error)
 {
-	if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+	if (grp_info->transfer && grp_info->external &&
+	    grp_info->fdb_def_rule) {
 		if (group == UINT32_MAX)
 			return rte_flow_error_set
 						(error, EINVAL,
@@ -6784,25 +6786,25 @@ int
 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			 const struct mlx5_flow_tunnel *tunnel,
 			 uint32_t group, uint32_t *table,
-			 struct flow_grp_info grp_info,
+			 const struct flow_grp_info *grp_info,
 			 struct rte_flow_error *error)
 {
 	int ret;
 	bool standard_translation;
 
-	if (!grp_info.skip_scale && grp_info.external &&
+	if (!grp_info->skip_scale && grp_info->external &&
 	    group < MLX5_MAX_TABLES_EXTERNAL)
 		group *= MLX5_FLOW_TABLE_FACTOR;
 	if (is_tunnel_offload_active(dev)) {
-		standard_translation = !grp_info.external ||
-					grp_info.std_tbl_fix;
+		standard_translation = !grp_info->external ||
+					grp_info->std_tbl_fix;
 	} else {
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
 		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
-		dev->data->port_id, group, grp_info.transfer,
-		grp_info.external, grp_info.fdb_def_rule,
+		dev->data->port_id, group, grp_info->transfer,
+		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
 	if (standard_translation)
 		ret = flow_group_to_table(dev->data->port_id, group, table,
@@ -7299,7 +7301,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
 	miss_attr.group = jump_data->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
+				       &flow_table, &grp_info, error);
 	if (ret)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index fbc6173fcb..c33c0fee7c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1273,8 +1273,8 @@ tunnel_use_standard_attr_group_translate
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			     const struct mlx5_flow_tunnel *tunnel,
 			     uint32_t group, uint32_t *table,
-			     struct flow_grp_info flags,
-				 struct rte_flow_error *error);
+			     const struct flow_grp_info *flags,
+			     struct rte_flow_error *error);
 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 				     int tunnel, uint64_t layer_types,
 				     uint64_t hash_fields);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 62d9ca9ffb..25ab9adee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3935,7 +3935,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
 	target_group =
 		((const struct rte_flow_action_jump *)action->conf)->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	if (attributes->group == target_group &&
@@ -5103,7 +5103,7 @@ static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
 			    const struct mlx5_flow_tunnel *tunnel,
 			    const struct rte_flow_attr *attributes,
-			    struct flow_grp_info grp_info,
+			    const struct flow_grp_info *grp_info,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -5258,7 +5258,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	}
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
-	ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
 	if (ret < 0)
 		return ret;
 	is_root = (uint64_t)ret;
@@ -9597,7 +9597,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	dev_flow->dv.group = table;
@@ -9944,7 +9944,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			ret = mlx5_flow_group_to_table(dev, tunnel,
 						       jump_group,
 						       &table,
-						       grp_info, error);
+						       &grp_info, error);
 			if (ret)
 				return ret;
 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 4/6] net/mlx5: fix tunnel offload object allocation
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (2 preceding siblings ...)
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 35 ++++++++-----------------
 3 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..31011c3a72 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_ID] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TNL_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7ee63a7a14..1f2b873942 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 29785e8891..cd94a73e53 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7432,14 +7432,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7461,39 +7460,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (3 preceding siblings ...)
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Suanming Mou

The original patch was removing active tunnel offload objects from a
tunnels db list without checking its reference counter value.
That action was leading to a PMD crash.

Current patch isolates tunnels db list into a separate API. That API
manages MT protection of the tunnel offload db.

Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_flow.h |   6 +-
 2 files changed, 195 insertions(+), 77 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cd94a73e53..eea185ba82 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5639,11 +5639,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
 
-		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
-		LIST_REMOVE(tunnel, chain);
-		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7220,6 +7217,15 @@ union tunnel_offload_mark {
 	};
 };
 
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op);
+
 static int
 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 			     struct rte_flow *flow,
@@ -7441,18 +7447,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op)
 {
+	bool verdict = false;
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct mlx5_flow_tunnel *tunnel;
 
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+		verdict = match(dev, tunnel, (const void *)ctx);
+		if (verdict)
 			break;
 	}
+	if (!lock_op)
+		rte_spinlock_unlock(&thub->sl);
+	if (verdict && hit)
+		hit(dev, tunnel, ctx);
+	if (!verdict && miss)
+		miss(dev, ctx);
+	if (lock_op)
+		rte_spinlock_unlock(&thub->sl);
 
-	return tun;
+	return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+	uint32_t tunnel_id;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+		     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+		   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct tunnel_db_find_tunnel_id_ctx ctx = {
+		.tunnel_id = id,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+				      find_tunnel_id_hit, NULL, &ctx, true);
+
+	return ctx.tunnel;
 }
 
 static struct mlx5_flow_tunnel *
@@ -7500,38 +7560,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 	return tunnel;
 }
 
+struct tunnel_db_get_tunnel_ctx {
+	const struct rte_flow_tunnel *app_tunnel;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+		       sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	tunnel->refctn++;
+	ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	rte_spinlock_unlock(&thub->sl);
+	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+	ctx->tunnel->refctn = 1;
+	rte_spinlock_lock(&thub->sl);
+	if (ctx->tunnel)
+		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
 		     struct mlx5_flow_tunnel **tunnel)
 {
-	int ret;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (!memcmp(app_tunnel, &tun->app_tunnel,
-			    sizeof(*app_tunnel))) {
-			*tunnel = tun;
-			ret = 0;
-			break;
-		}
-	}
-	if (!tun) {
-		tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
-		if (tun) {
-			LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
-			*tunnel = tun;
-		} else {
-			ret = -ENOMEM;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (tun)
-		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+	struct tunnel_db_get_tunnel_ctx ctx = {
+		.app_tunnel = app_tunnel,
+	};
 
-	return ret;
+	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+				      get_tunnel_miss, &ctx, true);
+	*tunnel = ctx.tunnel;
+	return ctx.tunnel ? 0 : -ENOMEM;
 }
 
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
@@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 	*num_of_items = 1;
 	return 0;
 }
+
+struct tunnel_db_element_release_ctx {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint32_t num_elements;
+	struct rte_flow_error *error;
+	int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	if (ctx->num_elements != 1)
+		return false;
+	else if (ctx->items)
+		return ctx->items == &tunnel->item;
+	else if (ctx->actions)
+		return ctx->actions == &tunnel->action;
+
+	return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	ctx->ret = 0;
+	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				      "invalid argument");
+}
+
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
-			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+		       struct rte_flow_item *pmd_items,
+		       uint32_t num_items, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = pmd_items,
+		.actions = NULL,
+		.num_elements = num_items,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
 }
 
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
-				struct rte_flow_action *pmd_actions,
-				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+			 struct rte_flow_action *pmd_actions,
+			 uint32_t num_actions, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = NULL,
+		.actions = pmd_actions,
+		.num_elements = num_actions,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
 
-	return 0;
+	return ctx.ret;
 }
 
 static int
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c33c0fee7c..f64384217f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {
 
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
+	/* Tunnels list
+	 * Access to the list MUST be MT protected
+	 */
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
-	rte_spinlock_t sl;			/* Tunnel list spinlock. */
+	 /* protect access to the tunnels list */
+	rte_spinlock_t sl;
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v4 6/6] net/mlx5: fix crash in tunnel offload setup
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (4 preceding siblings ...)
  2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
@ 2020-11-16  9:49   ` Gregory Etelson
  5 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16  9:49 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The new flow table resource management API triggered a PMD crash in
tunnel offload mode, when tunnel match flow rule was inserted before
tunnel set rule.

Reason for the crash was double flow table registration. The table was
registered by the tunnel offload code for the first time and once
more by PMD code, as part of general table processing. The table
counter was decremented only once during the rule destruction and
caused a resource leak that triggered the crash.

The patch updates PMD registration with tunnel offload parameters and
removes table registration in tunnel related code.

Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++----------------
 2 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index eea185ba82..cf840359ba 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6799,7 +6799,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
-		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
 		dev->data->port_id, group, grp_info->transfer,
 		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 25ab9adee6..5e230a3c25 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
 				   "cannot get table");
 		return NULL;
 	}
+	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
 	return &tbl_data->tbl;
 }
@@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
 		if (he)
 			mlx5_hlist_unregister(tunnel_grp_hash, he);
 		DRV_LOG(DEBUG,
-			"Table_id %#x tunnel %u group %u released.",
+			"Table_id %u tunnel %u group %u released.",
 			table_id,
 			tbl_data->tunnel ?
 			tbl_data->tunnel->tunnel_id : 0,
@@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 			 struct mlx5_flow_dv_matcher *ref,
 			 union mlx5_flow_tbl_key *key,
 			 struct mlx5_flow *dev_flow,
+			 const struct mlx5_flow_tunnel *tunnel,
+			 uint32_t group_id,
 			 struct rte_flow_error *error)
 {
 	struct mlx5_cache_entry *entry;
@@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 		.data = ref,
 	};
 
-	tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
-				       key->domain, false, NULL, 0, 0, error);
+	/**
+	 * tunnel offload API requires this registration for cases when
+	 * tunnel match rule was inserted before tunnel set rule.
+	 */
+	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+				       key->direction, key->domain,
+				       dev_flow->external, tunnel,
+				       group_id, 0, error);
 	if (!tbl)
 		return -rte_errno;	/* No need to refill the error info */
 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
@@ -9611,10 +9621,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		/*
 		 * do not add decap action if match rule drops packet
 		 * HW rejects rules with decap & drop
+		 *
+		 * if tunnel match rule was inserted before matching tunnel set
+		 * rule flow table used in the match rule must be registered.
+		 * current implementation handles that in the
+		 * flow_dv_match_register() at the function end.
 		 */
 		bool add_decap = true;
 		const struct rte_flow_action *ptr = actions;
-		struct mlx5_flow_tbl_resource *tbl;
 
 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
@@ -9631,20 +9645,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 		}
-		/*
-		 * bind table_id with <group, table> for tunnel match rule.
-		 * Tunnel set rule establishes that bind in JUMP action handler.
-		 * Required for scenario when application creates tunnel match
-		 * rule before tunnel set rule.
-		 */
-		tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-					       attr->transfer,
-					       !!dev_flow->external, tunnel,
-					       attr->group, 0, error);
-		if (!tbl)
-			return rte_flow_error_set
-			       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
-			       actions, "cannot register tunnel group");
 	}
 	for (; !actions_end ; actions++) {
 		const struct rte_flow_action_queue *queue;
@@ -10474,7 +10474,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
 	tbl_key.table_id = dev_flow->dv.group;
-	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+				     tunnel, attr->group, error))
 		return -rte_errno;
 	return 0;
 }
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5
  2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
                   ` (5 preceding siblings ...)
  2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-16 14:02 ` Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
                     ` (6 more replies)
  6 siblings, 7 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland

post merge fixes that restore tunnel offload functionality in mlx5.

v2: resolve compilation error in ubuntu inbox.
v3: re-arrange patches to prevent ubuntu compilation error.
v4: more compilation fixes.
v5: rebase on 20.11-rc4

Gregory Etelson (6):
  net/mlx5: fix tunnel offload callback names
  net/mlx5: fix build with Direct Verbs disabled
  net/mlx5: fix structure passing method in function call
  net/mlx5: fix tunnel offload object allocation
  net/mlx5: fix tunnel offload hub multi-thread protection
  net/mlx5: fix crash in tunnel offload setup

 drivers/net/mlx5/mlx5.c         |   50 +-
 drivers/net/mlx5/mlx5.h         |    4 +-
 drivers/net/mlx5/mlx5_flow.c    | 1274 ++++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow.h    |   15 +-
 drivers/net/mlx5/mlx5_flow_dv.c |   49 +-
 5 files changed, 817 insertions(+), 575 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Fix mlx5_flow_tunnel_action_release and mlx5_flow_tunnel_item_release
callback names to match tunnel offload names pattern.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 324349ed19..98559ece2b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -662,9 +662,9 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_item_release(struct rte_eth_dev *dev,
-		       struct rte_flow_item *pmd_items,
-		       uint32_t num_items, struct rte_flow_error *err)
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -687,9 +687,10 @@ mlx5_flow_item_release(struct rte_eth_dev *dev,
 }
 
 static int
-mlx5_flow_action_release(struct rte_eth_dev *dev,
-			 struct rte_flow_action *pmd_actions,
-			 uint32_t num_actions, struct rte_flow_error *err)
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
 {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
@@ -760,8 +761,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
 	.shared_action_query = mlx5_shared_action_query,
 	.tunnel_decap_set = mlx5_flow_tunnel_decap_set,
 	.tunnel_match = mlx5_flow_tunnel_match,
-	.tunnel_action_decap_release = mlx5_flow_action_release,
-	.tunnel_item_release = mlx5_flow_item_release,
+	.tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
+	.tunnel_item_release = mlx5_flow_tunnel_item_release,
 	.get_restore_info = mlx5_flow_tunnel_get_restore_info,
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-17 14:33     ` Ferruh Yigit
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
                     ` (4 subsequent siblings)
  6 siblings, 1 reply; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload API is implemented for Direct Verbs environment only.
Current patch re-arranges tunnel related functions for compilation in
non Direct Verbs setups to prevent compilation failures.  The patch
does not introduce new functions.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 1048 +++++++++++++++++++---------------
 drivers/net/mlx5/mlx5_flow.h |    5 +
 2 files changed, 594 insertions(+), 459 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 98559ece2b..c8152cb8b1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -33,16 +33,35 @@
 #include "mlx5_common_os.h"
 #include "rte_pmd_mlx5.h"
 
+struct tunnel_default_miss_ctx {
+	uint16_t *queue;
+	__extension__
+	union {
+		struct rte_flow_action_rss action_rss;
+		struct rte_flow_action_queue miss_queue;
+		struct rte_flow_action_jump miss_jump;
+		uint8_t raw[0];
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error);
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
 static void
 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
-static int
-mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
-		     const struct rte_flow_tunnel *app_tunnel,
-		     struct mlx5_flow_tunnel **tunnel);
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error);
+
 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
 static void mlx5_flow_pop_thread_workspace(void);
 
@@ -580,171 +599,32 @@ static int mlx5_shared_action_query
 				 const struct rte_flow_shared_action *action,
 				 void *data,
 				 struct rte_flow_error *error);
-static inline bool
-mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
-			  struct rte_flow_tunnel *tunnel,
-			  const char *err_msg)
-{
-	err_msg = NULL;
-	if (!is_tunnel_offload_active(dev)) {
-		err_msg = "tunnel offload was not activated";
-		goto out;
-	} else if (!tunnel) {
-		err_msg = "no application tunnel";
-		goto out;
-	}
-
-	switch (tunnel->type) {
-	default:
-		err_msg = "unsupported tunnel type";
-		goto out;
-	case RTE_FLOW_ITEM_TYPE_VXLAN:
-		break;
-	}
-
-out:
-	return !err_msg;
-}
-
-
 static int
 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
 		    struct rte_flow_tunnel *app_tunnel,
 		    struct rte_flow_action **actions,
 		    uint32_t *num_of_actions,
-		    struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*actions = &tunnel->action;
-	*num_of_actions = 1;
-	return 0;
-}
-
+		    struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 		       struct rte_flow_tunnel *app_tunnel,
 		       struct rte_flow_item **items,
 		       uint32_t *num_of_items,
-		       struct rte_flow_error *error)
-{
-	int ret;
-	struct mlx5_flow_tunnel *tunnel;
-	const char *err_msg = NULL;
-	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
-
-	if (!verdict)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  err_msg);
-	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
-	if (ret < 0) {
-		return rte_flow_error_set(error, ret,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "failed to initialize pmd tunnel");
-	}
-	*items = &tunnel->item;
-	*num_of_items = 1;
-	return 0;
-}
-
+		       struct rte_flow_error *error);
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
 			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
-}
-
+			      uint32_t num_items, struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
 				struct rte_flow_action *pmd_actions,
 				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-
-	return 0;
-}
-
+				struct rte_flow_error *err);
 static int
 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
 				  struct rte_mbuf *m,
 				  struct rte_flow_restore_info *info,
-				  struct rte_flow_error *err)
-{
-	uint64_t ol_flags = m->ol_flags;
-	const struct mlx5_flow_tbl_data_entry *tble;
-	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
-
-	if ((ol_flags & mask) != mask)
-		goto err;
-	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
-	if (!tble) {
-		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
-			dev->data->port_id, m->hash.fdir.hi);
-		goto err;
-	}
-	MLX5_ASSERT(tble->tunnel);
-	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
-	info->group_id = tble->group_id;
-	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
-		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
-		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
-
-	return 0;
-
-err:
-	return rte_flow_error_set(err, EINVAL,
-				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				  "failed to get restore info");
-}
+				  struct rte_flow_error *err);
 
 static const struct rte_flow_ops mlx5_flow_ops = {
 	.validate = mlx5_flow_validate,
@@ -4160,174 +4040,38 @@ flow_hairpin_split(struct rte_eth_dev *dev,
 	return 0;
 }
 
-__extension__
-union tunnel_offload_mark {
-	uint32_t val;
-	struct {
-		uint32_t app_reserve:8;
-		uint32_t table_id:15;
-		uint32_t transfer:1;
-		uint32_t _unused_:8;
-	};
-};
-
-struct tunnel_default_miss_ctx {
-	uint16_t *queue;
-	__extension__
-	union {
-		struct rte_flow_action_rss action_rss;
-		struct rte_flow_action_queue miss_queue;
-		struct rte_flow_action_jump miss_jump;
-		uint8_t raw[0];
-	};
-};
-
+/**
+ * The last stage of splitting chain, just creates the subflow
+ * without any modification.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] flow
+ *   Parent flow structure pointer.
+ * @param[in, out] sub_flow
+ *   Pointer to return the created subflow, may be NULL.
+ * @param[in] attr
+ *   Flow rule attributes.
+ * @param[in] items
+ *   Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ *   Associated actions (list terminated by the END action).
+ * @param[in] flow_split_info
+ *   Pointer to flow split info structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ * @return
+ *   0 on success, negative value otherwise
+ */
 static int
-flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
-			     struct rte_flow *flow,
-			     const struct rte_flow_attr *attr,
-			     const struct rte_flow_action *app_actions,
-			     uint32_t flow_idx,
-			     struct tunnel_default_miss_ctx *ctx,
-			     struct rte_flow_error *error)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow *dev_flow;
-	struct rte_flow_attr miss_attr = *attr;
-	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
-	const struct rte_flow_item miss_items[2] = {
-		{
-			.type = RTE_FLOW_ITEM_TYPE_ETH,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		},
-		{
-			.type = RTE_FLOW_ITEM_TYPE_END,
-			.spec = NULL,
-			.last = NULL,
-			.mask = NULL
-		}
-	};
-	union tunnel_offload_mark mark_id;
-	struct rte_flow_action_mark miss_mark;
-	struct rte_flow_action miss_actions[3] = {
-		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
-		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
-	};
-	const struct rte_flow_action_jump *jump_data;
-	uint32_t i, flow_table = 0; /* prevent compilation warning */
-	struct flow_grp_info grp_info = {
-		.external = 1,
-		.transfer = attr->transfer,
-		.fdb_def_rule = !!priv->fdb_def_rule,
-		.std_tbl_fix = 0,
-	};
-	int ret;
-
-	if (!attr->transfer) {
-		uint32_t q_size;
-
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
-		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
-		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
-					 0, SOCKET_ID_ANY);
-		if (!ctx->queue)
-			return rte_flow_error_set
-				(error, ENOMEM,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid default miss RSS");
-		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
-		ctx->action_rss.level = 0,
-		ctx->action_rss.types = priv->rss_conf.rss_hf,
-		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
-		ctx->action_rss.queue_num = priv->reta_idx_n,
-		ctx->action_rss.key = priv->rss_conf.rss_key,
-		ctx->action_rss.queue = ctx->queue;
-		if (!priv->reta_idx_n || !priv->rxqs_n)
-			return rte_flow_error_set
-				(error, EINVAL,
-				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-				NULL, "invalid port configuration");
-		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
-			ctx->action_rss.types = 0;
-		for (i = 0; i != priv->reta_idx_n; ++i)
-			ctx->queue[i] = (*priv->reta_idx)[i];
-	} else {
-		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
-		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
-	}
-	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
-	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
-	jump_data = app_actions->conf;
-	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
-	miss_attr.group = jump_data->group;
-	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
-	if (ret)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
-					  NULL, "invalid tunnel id");
-	mark_id.app_reserve = 0;
-	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
-	mark_id.transfer = !!attr->transfer;
-	mark_id._unused_ = 0;
-	miss_mark.id = mark_id.val;
-	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
-				    miss_items, miss_actions, flow_idx, error);
-	if (!dev_flow)
-		return -rte_errno;
-	dev_flow->flow = flow;
-	dev_flow->external = true;
-	dev_flow->tunnel = tunnel;
-	/* Subflow object was created, we must include one in the list. */
-	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
-		      dev_flow->handle, next);
-	DRV_LOG(DEBUG,
-		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
-		dev->data->port_id, tunnel->app_tunnel.type,
-		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
-	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
-				  miss_actions, error);
-	if (!ret)
-		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
-						  error);
-
-	return ret;
-}
-
-/**
- * The last stage of splitting chain, just creates the subflow
- * without any modification.
- *
- * @param[in] dev
- *   Pointer to Ethernet device.
- * @param[in] flow
- *   Parent flow structure pointer.
- * @param[in, out] sub_flow
- *   Pointer to return the created subflow, may be NULL.
- * @param[in] attr
- *   Flow rule attributes.
- * @param[in] items
- *   Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- *   Associated actions (list terminated by the END action).
- * @param[in] flow_split_info
- *   Pointer to flow split info structure.
- * @param[out] error
- *   Perform verbose error reporting if not NULL.
- * @return
- *   0 on success, negative value otherwise
- */
-static int
-flow_create_split_inner(struct rte_eth_dev *dev,
-			struct rte_flow *flow,
-			struct mlx5_flow **sub_flow,
-			const struct rte_flow_attr *attr,
-			const struct rte_flow_item items[],
-			const struct rte_flow_action actions[],
-			struct mlx5_flow_split_info *flow_split_info,
-			struct rte_flow_error *error)
+flow_create_split_inner(struct rte_eth_dev *dev,
+			struct rte_flow *flow,
+			struct mlx5_flow **sub_flow,
+			const struct rte_flow_attr *attr,
+			const struct rte_flow_item items[],
+			const struct rte_flow_action actions[],
+			struct mlx5_flow_split_info *flow_split_info,
+			struct rte_flow_error *error)
 {
 	struct mlx5_flow *dev_flow;
 
@@ -6953,99 +6697,6 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 	sh->cmng.pending_queries--;
 }
 
-static const struct mlx5_flow_tbl_data_entry  *
-tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_dev_ctx_shared *sh = priv->sh;
-	struct mlx5_hlist_entry *he;
-	union tunnel_offload_mark mbits = { .val = mark };
-	union mlx5_flow_tbl_key table_key = {
-		{
-			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
-			.dummy = 0,
-			.domain = !!mbits.transfer,
-			.direction = 0,
-		}
-	};
-	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
-	return he ?
-	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
-}
-
-static void
-mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
-				   struct mlx5_hlist_entry *entry)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
-
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-			tunnel_flow_tbl_to_id(tte->flow_table));
-	mlx5_free(tte);
-}
-
-static struct mlx5_hlist_entry *
-mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
-				   uint64_t key __rte_unused,
-				   void *ctx __rte_unused)
-{
-	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct tunnel_tbl_entry *tte;
-
-	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
-			  sizeof(*tte), 0,
-			  SOCKET_ID_ANY);
-	if (!tte)
-		goto err;
-	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-			  &tte->flow_table);
-	if (tte->flow_table >= MLX5_MAX_TABLES) {
-		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
-			tte->flow_table);
-		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
-				tte->flow_table);
-		goto err;
-	} else if (!tte->flow_table) {
-		goto err;
-	}
-	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
-	return &tte->hash;
-err:
-	if (tte)
-		mlx5_free(tte);
-	return NULL;
-}
-
-static uint32_t
-tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
-				const struct mlx5_flow_tunnel *tunnel,
-				uint32_t group, uint32_t *table,
-				struct rte_flow_error *error)
-{
-	struct mlx5_hlist_entry *he;
-	struct tunnel_tbl_entry *tte;
-	union tunnel_tbl_key key = {
-		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
-		.group = group
-	};
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_hlist *group_hash;
-
-	group_hash = tunnel ? tunnel->groups : thub->groups;
-	he = mlx5_hlist_register(group_hash, key.val, NULL);
-	if (!he)
-		return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
-					  NULL,
-					  "tunnel group index not supported");
-	tte = container_of(he, typeof(*tte), hash);
-	*table = tte->flow_table;
-	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
-		dev->data->port_id, key.tunnel_id, group, *table);
-	return 0;
-}
-
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
 		    struct flow_grp_info grp_info, struct rte_flow_error *error)
@@ -7505,45 +7156,288 @@ mlx5_shared_action_flush(struct rte_eth_dev *dev)
 	return ret;
 }
 
-static void
-mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
-		      struct mlx5_flow_tunnel *tunnel)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-
-	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
-		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
-	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
-}
+#ifndef HAVE_MLX5DV_DR
+#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
+#else
+#define MLX5_DOMAIN_SYNC_FLOW \
+	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
+#endif
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
 {
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
-			break;
-	}
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	const struct mlx5_flow_driver_ops *fops;
+	int ret;
+	struct rte_flow_attr attr = { .transfer = 0 };
 
-	return tun;
+	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
+	if (ret > 0)
+		ret = -ret;
+	return ret;
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
-			  const struct rte_flow_tunnel *app_tunnel)
-{
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_tunnel *tunnel;
-	uint32_t id;
-
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+/**
+ * tunnel offload functionalilty is defined for DV environment only
+ */
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+__extension__
+union tunnel_offload_mark {
+	uint32_t val;
+	struct {
+		uint32_t app_reserve:8;
+		uint32_t table_id:15;
+		uint32_t transfer:1;
+		uint32_t _unused_:8;
+	};
+};
+
+static int
+flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
+			     struct rte_flow *flow,
+			     const struct rte_flow_attr *attr,
+			     const struct rte_flow_action *app_actions,
+			     uint32_t flow_idx,
+			     struct tunnel_default_miss_ctx *ctx,
+			     struct rte_flow_error *error)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow *dev_flow;
+	struct rte_flow_attr miss_attr = *attr;
+	const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
+	const struct rte_flow_item miss_items[2] = {
+		{
+			.type = RTE_FLOW_ITEM_TYPE_ETH,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		},
+		{
+			.type = RTE_FLOW_ITEM_TYPE_END,
+			.spec = NULL,
+			.last = NULL,
+			.mask = NULL
+		}
+	};
+	union tunnel_offload_mark mark_id;
+	struct rte_flow_action_mark miss_mark;
+	struct rte_flow_action miss_actions[3] = {
+		[0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
+		[2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
+	};
+	const struct rte_flow_action_jump *jump_data;
+	uint32_t i, flow_table = 0; /* prevent compilation warning */
+	struct flow_grp_info grp_info = {
+		.external = 1,
+		.transfer = attr->transfer,
+		.fdb_def_rule = !!priv->fdb_def_rule,
+		.std_tbl_fix = 0,
+	};
+	int ret;
+
+	if (!attr->transfer) {
+		uint32_t q_size;
+
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
+		q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
+		ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
+					 0, SOCKET_ID_ANY);
+		if (!ctx->queue)
+			return rte_flow_error_set
+				(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid default miss RSS");
+		ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+		ctx->action_rss.level = 0,
+		ctx->action_rss.types = priv->rss_conf.rss_hf,
+		ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
+		ctx->action_rss.queue_num = priv->reta_idx_n,
+		ctx->action_rss.key = priv->rss_conf.rss_key,
+		ctx->action_rss.queue = ctx->queue;
+		if (!priv->reta_idx_n || !priv->rxqs_n)
+			return rte_flow_error_set
+				(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+				NULL, "invalid port configuration");
+		if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
+			ctx->action_rss.types = 0;
+		for (i = 0; i != priv->reta_idx_n; ++i)
+			ctx->queue[i] = (*priv->reta_idx)[i];
+	} else {
+		miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
+		ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
+	}
+	miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
+	for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
+	jump_data = app_actions->conf;
+	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
+	miss_attr.group = jump_data->group;
+	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
+				       &flow_table, grp_info, error);
+	if (ret)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+					  NULL, "invalid tunnel id");
+	mark_id.app_reserve = 0;
+	mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
+	mark_id.transfer = !!attr->transfer;
+	mark_id._unused_ = 0;
+	miss_mark.id = mark_id.val;
+	dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
+				    miss_items, miss_actions, flow_idx, error);
+	if (!dev_flow)
+		return -rte_errno;
+	dev_flow->flow = flow;
+	dev_flow->external = true;
+	dev_flow->tunnel = tunnel;
+	/* Subflow object was created, we must include one in the list. */
+	SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
+		      dev_flow->handle, next);
+	DRV_LOG(DEBUG,
+		"port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
+		dev->data->port_id, tunnel->app_tunnel.type,
+		tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
+	ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
+				  miss_actions, error);
+	if (!ret)
+		ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
+						  error);
+
+	return ret;
+}
+
+static const struct mlx5_flow_tbl_data_entry  *
+tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_dev_ctx_shared *sh = priv->sh;
+	struct mlx5_hlist_entry *he;
+	union tunnel_offload_mark mbits = { .val = mark };
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = tunnel_id_to_flow_tbl(mbits.table_id),
+			.dummy = 0,
+			.domain = !!mbits.transfer,
+			.direction = 0,
+		}
+	};
+	he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
+	return he ?
+	       container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
+}
+
+static void
+mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
+				   struct mlx5_hlist_entry *entry)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
+
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+			tunnel_flow_tbl_to_id(tte->flow_table));
+	mlx5_free(tte);
+}
+
+static struct mlx5_hlist_entry *
+mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
+				   uint64_t key __rte_unused,
+				   void *ctx __rte_unused)
+{
+	struct mlx5_dev_ctx_shared *sh = list->ctx;
+	struct tunnel_tbl_entry *tte;
+
+	tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
+			  sizeof(*tte), 0,
+			  SOCKET_ID_ANY);
+	if (!tte)
+		goto err;
+	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+			  &tte->flow_table);
+	if (tte->flow_table >= MLX5_MAX_TABLES) {
+		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
+			tte->flow_table);
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+				tte->flow_table);
+		goto err;
+	} else if (!tte->flow_table) {
+		goto err;
+	}
+	tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
+	return &tte->hash;
+err:
+	if (tte)
+		mlx5_free(tte);
+	return NULL;
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
+				const struct mlx5_flow_tunnel *tunnel,
+				uint32_t group, uint32_t *table,
+				struct rte_flow_error *error)
+{
+	struct mlx5_hlist_entry *he;
+	struct tunnel_tbl_entry *tte;
+	union tunnel_tbl_key key = {
+		.tunnel_id = tunnel ? tunnel->tunnel_id : 0,
+		.group = group
+	};
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_hlist *group_hash;
+
+	group_hash = tunnel ? tunnel->groups : thub->groups;
+	he = mlx5_hlist_register(group_hash, key.val, NULL);
+	if (!he)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+					  NULL,
+					  "tunnel group index not supported");
+	tte = container_of(he, typeof(*tte), hash);
+	*table = tte->flow_table;
+	DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
+		dev->data->port_id, key.tunnel_id, group, *table);
+	return 0;
+}
+
+static void
+mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
+		      struct mlx5_flow_tunnel *tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
+		dev->data->port_id, tunnel->tunnel_id);
+	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
+			tunnel->tunnel_id);
+	mlx5_hlist_destroy(tunnel->groups);
+	mlx5_free(tunnel);
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (tun->tunnel_id == id)
+			break;
+	}
+
+	return tun;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
+			  const struct rte_flow_tunnel *app_tunnel)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tunnel *tunnel;
+	uint32_t id;
+
+	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
+			  &id);
 	if (id >= MLX5_MAX_TUNNELS) {
 		mlx5_ipool_free(priv->sh->ipool
 				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
@@ -7671,23 +7565,259 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	return err;
 }
 
-#ifndef HAVE_MLX5DV_DR
-#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
-#else
-#define MLX5_DOMAIN_SYNC_FLOW \
-	(MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
-#endif
+static inline bool
+mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
+			  struct rte_flow_tunnel *tunnel,
+			  const char *err_msg)
+{
+	err_msg = NULL;
+	if (!is_tunnel_offload_active(dev)) {
+		err_msg = "tunnel offload was not activated";
+		goto out;
+	} else if (!tunnel) {
+		err_msg = "no application tunnel";
+		goto out;
+	}
 
-int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+	switch (tunnel->type) {
+	default:
+		err_msg = "unsupported tunnel type";
+		goto out;
+	case RTE_FLOW_ITEM_TYPE_VXLAN:
+		break;
+	}
+
+out:
+	return !err_msg;
+}
+
+static int
+mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
+		    struct rte_flow_tunnel *app_tunnel,
+		    struct rte_flow_action **actions,
+		    uint32_t *num_of_actions,
+		    struct rte_flow_error *error)
 {
-	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-	const struct mlx5_flow_driver_ops *fops;
 	int ret;
-	struct rte_flow_attr attr = { .transfer = 0 };
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
 
-	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
-	ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
-	if (ret > 0)
-		ret = -ret;
-	return ret;
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*actions = &tunnel->action;
+	*num_of_actions = 1;
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
+		       struct rte_flow_tunnel *app_tunnel,
+		       struct rte_flow_item **items,
+		       uint32_t *num_of_items,
+		       struct rte_flow_error *error)
+{
+	int ret;
+	struct mlx5_flow_tunnel *tunnel;
+	const char *err_msg = NULL;
+	bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
+
+	if (!verdict)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  err_msg);
+	ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
+	if (ret < 0) {
+		return rte_flow_error_set(error, ret,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "failed to initialize pmd tunnel");
+	}
+	*items = &tunnel->item;
+	*num_of_items = 1;
+	return 0;
 }
+static int
+mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
+			      struct rte_flow_item *pmd_items,
+			      uint32_t num_items, struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->item == pmd_items) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_items != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
+				struct rte_flow_action *pmd_actions,
+				uint32_t num_actions,
+				struct rte_flow_error *err)
+{
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct mlx5_flow_tunnel *tun;
+
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tun, &thub->tunnels, chain) {
+		if (&tun->action == pmd_actions) {
+			LIST_REMOVE(tun, chain);
+			break;
+		}
+	}
+	rte_spinlock_unlock(&thub->sl);
+	if (!tun || num_actions != 1)
+		return rte_flow_error_set(err, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					  "invalid argument");
+	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tun);
+
+	return 0;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
+				  struct rte_mbuf *m,
+				  struct rte_flow_restore_info *info,
+				  struct rte_flow_error *err)
+{
+	uint64_t ol_flags = m->ol_flags;
+	const struct mlx5_flow_tbl_data_entry *tble;
+	const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
+
+	if ((ol_flags & mask) != mask)
+		goto err;
+	tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
+	if (!tble) {
+		DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
+			dev->data->port_id, m->hash.fdir.hi);
+		goto err;
+	}
+	MLX5_ASSERT(tble->tunnel);
+	memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
+	info->group_id = tble->group_id;
+	info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
+		      RTE_FLOW_RESTORE_INFO_GROUP_ID |
+		      RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
+
+	return 0;
+
+err:
+	return rte_flow_error_set(err, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "failed to get restore info");
+}
+
+#else /* HAVE_IBV_FLOW_DV_SUPPORT */
+static int
+mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+			   __rte_unused struct rte_flow_tunnel *app_tunnel,
+			   __rte_unused struct rte_flow_action **actions,
+			   __rte_unused uint32_t *num_of_actions,
+			   __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
+		       __rte_unused struct rte_flow_tunnel *app_tunnel,
+		       __rte_unused struct rte_flow_item **items,
+		       __rte_unused uint32_t *num_of_items,
+		       __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
+			      __rte_unused struct rte_flow_item *pmd_items,
+			      __rte_unused uint32_t num_items,
+			      __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused struct rte_flow_action *pmd_action,
+				__rte_unused uint32_t num_actions,
+				__rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
+				  __rte_unused struct rte_mbuf *m,
+				  __rte_unused struct rte_flow_restore_info *i,
+				  __rte_unused struct rte_flow_error *err)
+{
+	return -ENOTSUP;
+}
+
+static int
+flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
+			     __rte_unused struct rte_flow *flow,
+			     __rte_unused const struct rte_flow_attr *attr,
+			     __rte_unused const struct rte_flow_action *actions,
+			     __rte_unused uint32_t flow_idx,
+			     __rte_unused struct tunnel_default_miss_ctx *ctx,
+			     __rte_unused struct rte_flow_error *error)
+{
+	return -ENOTSUP;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
+		    __rte_unused uint32_t id)
+{
+	return NULL;
+}
+
+static void
+mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
+		      __rte_unused struct mlx5_flow_tunnel *tunnel)
+{
+}
+
+static uint32_t
+tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
+				__rte_unused const struct mlx5_flow_tunnel *t,
+				__rte_unused uint32_t group,
+				__rte_unused uint32_t *table,
+				struct rte_flow_error *error)
+{
+	return rte_flow_error_set(error, ENOTSUP,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "tunnel offload requires DV support");
+}
+
+void mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
+			     __rte_unused  uint16_t port_id)
+{
+	return;
+}
+#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
+
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5fac8672fc..fbc6173fcb 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -991,8 +991,13 @@ mlx5_tunnel_hub(struct rte_eth_dev *dev)
 static inline bool
 is_tunnel_offload_active(struct rte_eth_dev *dev)
 {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_priv *priv = dev->data->dev_private;
 	return !!priv->config.dv_miss_info;
+#else
+	RTE_SET_USED(dev);
+	return false;
+#endif
 }
 
 static inline bool
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 3/6] net/mlx5: fix structure passing method in function call
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev; +Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

Tunnel offload implementation introduced 64 bit-field flow_grp_info
structure. Since the structure size is 64 bits, the code passed that
type by value in function calls.

The patch changes that structure passing method to reference.

Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 20 +++++++++++---------
 drivers/net/mlx5/mlx5_flow.h    |  4 ++--
 drivers/net/mlx5/mlx5_flow_dv.c | 10 +++++-----
 3 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c8152cb8b1..bc93aa6377 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6699,9 +6699,11 @@ mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
 
 static int
 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
-		    struct flow_grp_info grp_info, struct rte_flow_error *error)
+		    const struct flow_grp_info *grp_info,
+		    struct rte_flow_error *error)
 {
-	if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
+	if (grp_info->transfer && grp_info->external &&
+	    grp_info->fdb_def_rule) {
 		if (group == UINT32_MAX)
 			return rte_flow_error_set
 						(error, EINVAL,
@@ -6758,25 +6760,25 @@ int
 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			 const struct mlx5_flow_tunnel *tunnel,
 			 uint32_t group, uint32_t *table,
-			 struct flow_grp_info grp_info,
+			 const struct flow_grp_info *grp_info,
 			 struct rte_flow_error *error)
 {
 	int ret;
 	bool standard_translation;
 
-	if (!grp_info.skip_scale && grp_info.external &&
+	if (!grp_info->skip_scale && grp_info->external &&
 	    group < MLX5_MAX_TABLES_EXTERNAL)
 		group *= MLX5_FLOW_TABLE_FACTOR;
 	if (is_tunnel_offload_active(dev)) {
-		standard_translation = !grp_info.external ||
-					grp_info.std_tbl_fix;
+		standard_translation = !grp_info->external ||
+					grp_info->std_tbl_fix;
 	} else {
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
 		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
-		dev->data->port_id, group, grp_info.transfer,
-		grp_info.external, grp_info.fdb_def_rule,
+		dev->data->port_id, group, grp_info->transfer,
+		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
 	if (standard_translation)
 		ret = flow_group_to_table(dev->data->port_id, group, table,
@@ -7273,7 +7275,7 @@ flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 	miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
 	miss_attr.group = jump_data->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
-				       &flow_table, grp_info, error);
+				       &flow_table, &grp_info, error);
 	if (ret)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index fbc6173fcb..c33c0fee7c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1273,8 +1273,8 @@ tunnel_use_standard_attr_group_translate
 int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 			     const struct mlx5_flow_tunnel *tunnel,
 			     uint32_t group, uint32_t *table,
-			     struct flow_grp_info flags,
-				 struct rte_flow_error *error);
+			     const struct flow_grp_info *flags,
+			     struct rte_flow_error *error);
 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 				     int tunnel, uint64_t layer_types,
 				     uint64_t hash_fields);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 62d9ca9ffb..25ab9adee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3935,7 +3935,7 @@ flow_dv_validate_action_jump(struct rte_eth_dev *dev,
 	target_group =
 		((const struct rte_flow_action_jump *)action->conf)->group;
 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	if (attributes->group == target_group &&
@@ -5103,7 +5103,7 @@ static int
 flow_dv_validate_attributes(struct rte_eth_dev *dev,
 			    const struct mlx5_flow_tunnel *tunnel,
 			    const struct rte_flow_attr *attributes,
-			    struct flow_grp_info grp_info,
+			    const struct flow_grp_info *grp_info,
 			    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -5258,7 +5258,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	}
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
-	ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
+	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
 	if (ret < 0)
 		return ret;
 	is_root = (uint64_t)ret;
@@ -9597,7 +9597,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
 				(dev, tunnel, attr, items, actions);
 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
-				       grp_info, error);
+				       &grp_info, error);
 	if (ret)
 		return ret;
 	dev_flow->dv.group = table;
@@ -9944,7 +9944,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			ret = mlx5_flow_group_to_table(dev, tunnel,
 						       jump_group,
 						       &table,
-						       grp_info, error);
+						       &grp_info, error);
 			if (ret)
 				return ret;
 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 4/6] net/mlx5: fix tunnel offload object allocation
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (2 preceding siblings ...)
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 35 ++++++++-----------------
 3 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..31011c3a72 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_ID] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TNL_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 2ad927bc7d..0155f5ca23 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_ID, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index bc93aa6377..06c19c6db3 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7406,14 +7406,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7435,39 +7434,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 5/6] net/mlx5: fix tunnel offload hub multi-thread protection
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (3 preceding siblings ...)
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
  2020-11-17 10:51   ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Raslan Darawsheh
  6 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Suanming Mou

The original patch was removing active tunnel offload objects from a
tunnels db list without checking its reference counter value.
That action was leading to a PMD crash.

Current patch isolates tunnels db list into a separate API. That API
manages MT protection of the tunnel offload db.

Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_flow.h |   6 +-
 2 files changed, 195 insertions(+), 77 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 06c19c6db3..2fe8648341 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5613,11 +5613,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
 
-		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
-		LIST_REMOVE(tunnel, chain);
-		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7194,6 +7191,15 @@ union tunnel_offload_mark {
 	};
 };
 
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op);
+
 static int
 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
 			     struct rte_flow *flow,
@@ -7415,18 +7421,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
-static struct mlx5_flow_tunnel *
-mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+static bool
+mlx5_access_tunnel_offload_db
+	(struct rte_eth_dev *dev,
+	 bool (*match)(struct rte_eth_dev *,
+		       struct mlx5_flow_tunnel *, const void *),
+	 void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
+	 void (*miss)(struct rte_eth_dev *, void *),
+	 void *ctx, bool lock_op)
 {
+	bool verdict = false;
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+	struct mlx5_flow_tunnel *tunnel;
 
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (tun->tunnel_id == id)
+	rte_spinlock_lock(&thub->sl);
+	LIST_FOREACH(tunnel, &thub->tunnels, chain) {
+		verdict = match(dev, tunnel, (const void *)ctx);
+		if (verdict)
 			break;
 	}
+	if (!lock_op)
+		rte_spinlock_unlock(&thub->sl);
+	if (verdict && hit)
+		hit(dev, tunnel, ctx);
+	if (!verdict && miss)
+		miss(dev, ctx);
+	if (lock_op)
+		rte_spinlock_unlock(&thub->sl);
 
-	return tun;
+	return verdict;
+}
+
+struct tunnel_db_find_tunnel_id_ctx {
+	uint32_t tunnel_id;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool
+find_tunnel_id_match(struct rte_eth_dev *dev,
+		     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return tunnel->tunnel_id == ctx->tunnel_id;
+}
+
+static void
+find_tunnel_id_hit(struct rte_eth_dev *dev,
+		   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_find_tunnel_id_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->tunnel = tunnel;
+}
+
+static struct mlx5_flow_tunnel *
+mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
+{
+	struct tunnel_db_find_tunnel_id_ctx ctx = {
+		.tunnel_id = id,
+	};
+
+	mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
+				      find_tunnel_id_hit, NULL, &ctx, true);
+
+	return ctx.tunnel;
 }
 
 static struct mlx5_flow_tunnel *
@@ -7474,38 +7534,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 	return tunnel;
 }
 
+struct tunnel_db_get_tunnel_ctx {
+	const struct rte_flow_tunnel *app_tunnel;
+	struct mlx5_flow_tunnel *tunnel;
+};
+
+static bool get_tunnel_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
+		       sizeof(*ctx->app_tunnel));
+}
+
+static void get_tunnel_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	tunnel->refctn++;
+	ctx->tunnel = tunnel;
+}
+
+static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
+{
+	/* called under tunnel spinlock protection */
+	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
+	struct tunnel_db_get_tunnel_ctx *ctx = x;
+
+	rte_spinlock_unlock(&thub->sl);
+	ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
+	ctx->tunnel->refctn = 1;
+	rte_spinlock_lock(&thub->sl);
+	if (ctx->tunnel)
+		LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
+}
+
+
 static int
 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
 		     const struct rte_flow_tunnel *app_tunnel,
 		     struct mlx5_flow_tunnel **tunnel)
 {
-	int ret;
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
-
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (!memcmp(app_tunnel, &tun->app_tunnel,
-			    sizeof(*app_tunnel))) {
-			*tunnel = tun;
-			ret = 0;
-			break;
-		}
-	}
-	if (!tun) {
-		tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
-		if (tun) {
-			LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
-			*tunnel = tun;
-		} else {
-			ret = -ENOMEM;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (tun)
-		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
+	struct tunnel_db_get_tunnel_ctx ctx = {
+		.app_tunnel = app_tunnel,
+	};
 
-	return ret;
+	mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
+				      get_tunnel_miss, &ctx, true);
+	*tunnel = ctx.tunnel;
+	return ctx.tunnel ? 0 : -ENOMEM;
 }
 
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
@@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
 	*num_of_items = 1;
 	return 0;
 }
+
+struct tunnel_db_element_release_ctx {
+	struct rte_flow_item *items;
+	struct rte_flow_action *actions;
+	uint32_t num_elements;
+	struct rte_flow_error *error;
+	int ret;
+};
+
+static bool
+tunnel_element_release_match(struct rte_eth_dev *dev,
+			     struct mlx5_flow_tunnel *tunnel, const void *x)
+{
+	const struct tunnel_db_element_release_ctx *ctx = x;
+
+	RTE_SET_USED(dev);
+	if (ctx->num_elements != 1)
+		return false;
+	else if (ctx->items)
+		return ctx->items == &tunnel->item;
+	else if (ctx->actions)
+		return ctx->actions == &tunnel->action;
+
+	return false;
+}
+
+static void
+tunnel_element_release_hit(struct rte_eth_dev *dev,
+			   struct mlx5_flow_tunnel *tunnel, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	ctx->ret = 0;
+	if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
+		mlx5_flow_tunnel_free(dev, tunnel);
+}
+
+static void
+tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
+{
+	struct tunnel_db_element_release_ctx *ctx = x;
+	RTE_SET_USED(dev);
+	ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
+				      RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				      "invalid argument");
+}
+
 static int
 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
-			      struct rte_flow_item *pmd_items,
-			      uint32_t num_items, struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+		       struct rte_flow_item *pmd_items,
+		       uint32_t num_items, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = pmd_items,
+		.actions = NULL,
+		.num_elements = num_items,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_items != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
-	return 0;
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
+
+	return ctx.ret;
 }
 
 static int
 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
-				struct rte_flow_action *pmd_actions,
-				uint32_t num_actions,
-				struct rte_flow_error *err)
-{
-	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
-	struct mlx5_flow_tunnel *tun;
+			 struct rte_flow_action *pmd_actions,
+			 uint32_t num_actions, struct rte_flow_error *err)
+{
+	struct tunnel_db_element_release_ctx ctx = {
+		.items = NULL,
+		.actions = pmd_actions,
+		.num_elements = num_actions,
+		.error = err,
+	};
 
-	rte_spinlock_lock(&thub->sl);
-	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions) {
-			LIST_REMOVE(tun, chain);
-			break;
-		}
-	}
-	rte_spinlock_unlock(&thub->sl);
-	if (!tun || num_actions != 1)
-		return rte_flow_error_set(err, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
-					  "invalid argument");
-	if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
-		mlx5_flow_tunnel_free(dev, tun);
+	mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
+				      tunnel_element_release_hit,
+				      tunnel_element_release_miss, &ctx, false);
 
-	return 0;
+	return ctx.ret;
 }
 
 static int
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index c33c0fee7c..f64384217f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -950,8 +950,12 @@ struct mlx5_flow_tunnel {
 
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
+	/* Tunnels list
+	 * Access to the list MUST be MT protected
+	 */
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
-	rte_spinlock_t sl;			/* Tunnel list spinlock. */
+	 /* protect access to the tunnels list */
+	rte_spinlock_t sl;
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* [dpdk-dev] [PATCH v5 6/6] net/mlx5: fix crash in tunnel offload setup
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (4 preceding siblings ...)
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
@ 2020-11-16 14:02   ` Gregory Etelson
  2020-11-17 10:51   ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Raslan Darawsheh
  6 siblings, 0 replies; 32+ messages in thread
From: Gregory Etelson @ 2020-11-16 14:02 UTC (permalink / raw)
  To: dev
  Cc: getelson, matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler,
	Xueming Li

The new flow table resource management API triggered a PMD crash in
tunnel offload mode, when tunnel match flow rule was inserted before
tunnel set rule.

Reason for the crash was double flow table registration. The table was
registered by the tunnel offload code for the first time and once
more by PMD code, as part of general table processing. The table
counter was decremented only once during the rule destruction and
caused a resource leak that triggered the crash.

The patch updates PMD registration with tunnel offload parameters and
removes table registration in tunnel related code.

Fixes: 663ad57dabb2 ("net/mlx5: make flow table cache thread safe")

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c | 39 +++++++++++++++++----------------
 2 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2fe8648341..b9e1c30713 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6773,7 +6773,7 @@ mlx5_flow_group_to_table(struct rte_eth_dev *dev,
 		standard_translation = true;
 	}
 	DRV_LOG(DEBUG,
-		"port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
+		"port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
 		dev->data->port_id, group, grp_info->transfer,
 		grp_info->external, grp_info->fdb_def_rule,
 		standard_translation ? "STANDARD" : "TUNNEL");
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 25ab9adee6..5e230a3c25 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -8042,6 +8042,8 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
 				   "cannot get table");
 		return NULL;
 	}
+	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
+		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
 	return &tbl_data->tbl;
 }
@@ -8080,7 +8082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
 		if (he)
 			mlx5_hlist_unregister(tunnel_grp_hash, he);
 		DRV_LOG(DEBUG,
-			"Table_id %#x tunnel %u group %u released.",
+			"Table_id %u tunnel %u group %u released.",
 			table_id,
 			tbl_data->tunnel ?
 			tbl_data->tunnel->tunnel_id : 0,
@@ -8192,6 +8194,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 			 struct mlx5_flow_dv_matcher *ref,
 			 union mlx5_flow_tbl_key *key,
 			 struct mlx5_flow *dev_flow,
+			 const struct mlx5_flow_tunnel *tunnel,
+			 uint32_t group_id,
 			 struct rte_flow_error *error)
 {
 	struct mlx5_cache_entry *entry;
@@ -8203,8 +8207,14 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 		.data = ref,
 	};
 
-	tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
-				       key->domain, false, NULL, 0, 0, error);
+	/**
+	 * tunnel offload API requires this registration for cases when
+	 * tunnel match rule was inserted before tunnel set rule.
+	 */
+	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
+				       key->direction, key->domain,
+				       dev_flow->external, tunnel,
+				       group_id, 0, error);
 	if (!tbl)
 		return -rte_errno;	/* No need to refill the error info */
 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
@@ -9611,10 +9621,14 @@ flow_dv_translate(struct rte_eth_dev *dev,
 		/*
 		 * do not add decap action if match rule drops packet
 		 * HW rejects rules with decap & drop
+		 *
+		 * if tunnel match rule was inserted before matching tunnel set
+		 * rule flow table used in the match rule must be registered.
+		 * current implementation handles that in the
+		 * flow_dv_match_register() at the function end.
 		 */
 		bool add_decap = true;
 		const struct rte_flow_action *ptr = actions;
-		struct mlx5_flow_tbl_resource *tbl;
 
 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
@@ -9631,20 +9645,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
 					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 		}
-		/*
-		 * bind table_id with <group, table> for tunnel match rule.
-		 * Tunnel set rule establishes that bind in JUMP action handler.
-		 * Required for scenario when application creates tunnel match
-		 * rule before tunnel set rule.
-		 */
-		tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
-					       attr->transfer,
-					       !!dev_flow->external, tunnel,
-					       attr->group, 0, error);
-		if (!tbl)
-			return rte_flow_error_set
-			       (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
-			       actions, "cannot register tunnel group");
 	}
 	for (; !actions_end ; actions++) {
 		const struct rte_flow_action_queue *queue;
@@ -10474,7 +10474,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
 	tbl_key.domain = attr->transfer;
 	tbl_key.direction = attr->egress;
 	tbl_key.table_id = dev_flow->dv.group;
-	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
+	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
+				     tunnel, attr->group, error))
 		return -rte_errno;
 	return 0;
 }
-- 
2.29.2


^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5
  2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
                     ` (5 preceding siblings ...)
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
@ 2020-11-17 10:51   ` Raslan Darawsheh
  6 siblings, 0 replies; 32+ messages in thread
From: Raslan Darawsheh @ 2020-11-17 10:51 UTC (permalink / raw)
  To: Gregory Etelson, dev; +Cc: Matan Azrad

Hi,

> -----Original Message-----
> From: Gregory Etelson <getelson@nvidia.com>
> Sent: Monday, November 16, 2020 4:02 PM
> To: dev@dpdk.org
> Cc: Gregory Etelson <getelson@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v5 0/6] restore tunnel offload functionality in mlx5
> 
> post merge fixes that restore tunnel offload functionality in mlx5.
> 
> v2: resolve compilation error in ubuntu inbox.
> v3: re-arrange patches to prevent ubuntu compilation error.
> v4: more compilation fixes.
> v5: rebase on 20.11-rc4
> 
> Gregory Etelson (6):
>   net/mlx5: fix tunnel offload callback names
>   net/mlx5: fix build with Direct Verbs disabled
>   net/mlx5: fix structure passing method in function call
>   net/mlx5: fix tunnel offload object allocation
>   net/mlx5: fix tunnel offload hub multi-thread protection
>   net/mlx5: fix crash in tunnel offload setup
> 
>  drivers/net/mlx5/mlx5.c         |   50 +-
>  drivers/net/mlx5/mlx5.h         |    4 +-
>  drivers/net/mlx5/mlx5_flow.c    | 1274 ++++++++++++++++++-------------
>  drivers/net/mlx5/mlx5_flow.h    |   15 +-
>  drivers/net/mlx5/mlx5_flow_dv.c |   49 +-
>  5 files changed, 817 insertions(+), 575 deletions(-)
> 
> --
> 2.29.2

Series applied to next-net-mlx with minor fixes to the fixes logs,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled
  2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
@ 2020-11-17 14:33     ` Ferruh Yigit
  0 siblings, 0 replies; 32+ messages in thread
From: Ferruh Yigit @ 2020-11-17 14:33 UTC (permalink / raw)
  To: Gregory Etelson, dev; +Cc: matan, rasland, Viacheslav Ovsiienko, Shahaf Shuler

On 11/16/2020 2:02 PM, Gregory Etelson wrote:
> Tunnel offload API is implemented for Direct Verbs environment only.
> Current patch re-arranges tunnel related functions for compilation in
> non Direct Verbs setups to prevent compilation failures.  The patch
> does not introduce new functions.
> 
> Fixes: 4ec6360de37d ("net/mlx5: implement tunnel offload")
> 
> Signed-off-by: Gregory Etelson <getelson@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

<...>

> +void mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
> +			     __rte_unused  uint16_t port_id)
> +{
> +	return;
> +}

There is a checkpatch warning about 'return' in the void funciton, will remove 
in next-net.

Also will fix the snytax and move function return type to one line above.

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2020-11-17 14:33 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-11  7:14 [dpdk-dev] [PATCH 0/4] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-11  7:14 ` [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 3/4] net/mlx5: fix PMD crash after tunnel offload match rule destruction Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-11  7:14 ` [dpdk-dev] [PATCH 4/4] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-11  8:51   ` Slava Ovsiienko
2020-11-16  9:13 ` [dpdk-dev] [PATCH v3 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16  9:13   ` [dpdk-dev] [PATCH v3 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-16  9:48 ` [dpdk-dev] [PATCH v4 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16  9:49   ` [dpdk-dev] [PATCH v4 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-16 14:02 ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 1/6] net/mlx5: fix tunnel offload callback names Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 2/6] net/mlx5: fix build with Direct Verbs disabled Gregory Etelson
2020-11-17 14:33     ` Ferruh Yigit
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 3/6] net/mlx5: fix structure passing method in function call Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 4/6] net/mlx5: fix tunnel offload object allocation Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 5/6] net/mlx5: fix tunnel offload hub multi-thread protection Gregory Etelson
2020-11-16 14:02   ` [dpdk-dev] [PATCH v5 6/6] net/mlx5: fix crash in tunnel offload setup Gregory Etelson
2020-11-17 10:51   ` [dpdk-dev] [PATCH v5 0/6] restore tunnel offload functionality in mlx5 Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).