DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix flow table hash list conversion
@ 2019-11-14 16:22 Matan Azrad
  2019-11-17 12:14 ` [dpdk-dev] [PATCH v2] " Matan Azrad
  0 siblings, 1 reply; 3+ messages in thread
From: Matan Azrad @ 2019-11-14 16:22 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, bingz

For the case when DR is not supported and DV is supported:
	multi-tables feature is off.
	In this case, only table 0 is supported.
	Table 0 structure wrongly was not created what prevented any
	matcher object to be created and even caused crashes.

Create the table hash list in DV case too.
Create table zero empty structure for each domain when DR is not
supported.
Allow NULL DR internal table object to be used.

Fixes: 860897d2895a ("net/mlx5: reorganize flow tables with hash list")
Cc: bingz@mellanox.com

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         | 187 +++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_flow_dv.c |  34 +-------
 2 files changed, 156 insertions(+), 65 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 35baaf7..ea999f5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -715,6 +715,146 @@ struct mlx5_flow_id_pool *
 }
 
 /**
+ * Destroy table hash list and all the root entries per domain.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ */
+static void
+mlx5_free_table_hash_list(struct mlx5_priv *priv)
+{
+	struct mlx5_ibv_shared *sh = priv->sh;
+	struct mlx5_flow_tbl_data_entry *tbl_data;
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = 0,
+			.reserved = 0,
+			.domain = 0,
+			.direction = 0,
+		}
+	};
+	struct mlx5_hlist_entry *pos;
+
+	if (!sh->flow_tbls)
+		return;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	table_key.direction = 1;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	table_key.direction = 0;
+	table_key.domain = 1;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
+}
+
+/**
+ * Initialize flow table hash list and create the root tables entry
+ * for each domain.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ *
+ * @return
+ *   Zero on success, positive error code otherwise.
+ */
+static int
+mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
+{
+	struct mlx5_ibv_shared *sh = priv->sh;
+	char s[MLX5_HLIST_NAMESIZE];
+	int err = 0;
+
+	assert(sh);
+	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
+	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
+	if (!sh->flow_tbls) {
+		DRV_LOG(ERR, "flow tables with hash creation failed.\n");
+		return ENOMEM;
+	}
+#ifndef HAVE_MLX5DV_DR
+	/*
+	 * In case we have not DR support, the zero tables should be created
+	 * because DV expect to see them even if they cannot be created by
+	 * RDMA-CORE.
+	 */
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = 0,
+			.reserved = 0,
+			.domain = 0,
+			.direction = 0,
+		}
+	};
+	struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
+							  sizeof(*tbl_data), 0);
+
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err) {
+		goto error;
+	}
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	table_key.direction = 1;
+	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err) {
+		goto error;
+	}
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	table_key.direction = 0;
+	table_key.domain = 1;
+	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err) {
+		goto error;
+	}
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	return err;
+error:
+	mlx5_free_table_hash_list(priv);
+#endif /* HAVE_MLX5DV_DR */
+	return err;
+
+}
+
+/**
  * Initialize DR related data within private structure.
  * Routine checks the reference counter and does actual
  * resources creation/initialization only if counter is zero.
@@ -728,13 +868,15 @@ struct mlx5_flow_id_pool *
 static int
 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 {
+	int err = mlx5_alloc_table_hash_list(priv);
+
+	if (err)
+		return err;
 #ifdef HAVE_MLX5DV_DR
 	struct mlx5_ibv_shared *sh = priv->sh;
-	int err = 0;
-	void *domain;
 	char s[MLX5_HLIST_NAMESIZE];
+	void *domain;
 
-	assert(sh);
 	if (sh->dv_refcnt) {
 		/* Shared DV/DR structures is already initialized. */
 		sh->dv_refcnt++;
@@ -772,20 +914,11 @@ struct mlx5_flow_id_pool *
 		sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
 	}
 #endif
-	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
-	sh->flow_tbls = mlx5_hlist_create(s,
-					  MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
-	if (!sh->flow_tbls) {
-		DRV_LOG(ERR, "flow tables with hash creation failed.\n");
-		err = -ENOMEM;
-		goto error;
-	}
 	/* create tags hash list table. */
 	snprintf(s, sizeof(s), "%s_tags", priv->sh->ibdev_name);
 	sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
 	if (!sh->flow_tbls) {
 		DRV_LOG(ERR, "tags with hash creation failed.\n");
-		err = -ENOMEM;
 		goto error;
 	}
 	sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
@@ -807,10 +940,6 @@ struct mlx5_flow_id_pool *
 		mlx5_glue->dr_destroy_domain(sh->fdb_domain);
 		sh->fdb_domain = NULL;
 	}
-	if (sh->flow_tbls) {
-		mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
-		sh->flow_tbls = NULL;
-	}
 	if (sh->esw_drop_action) {
 		mlx5_glue->destroy_flow_action(sh->esw_drop_action);
 		sh->esw_drop_action = NULL;
@@ -819,11 +948,9 @@ struct mlx5_flow_id_pool *
 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
 		sh->pop_vlan_action = NULL;
 	}
-	return err;
-#else
-	(void)priv;
-	return 0;
+	mlx5_free_table_hash_list(priv);
 #endif
+	return err;
 }
 
 /**
@@ -846,16 +973,6 @@ struct mlx5_flow_id_pool *
 	assert(sh->dv_refcnt);
 	if (sh->dv_refcnt && --sh->dv_refcnt)
 		return;
-	if (sh->flow_tbls) {
-		/* flow table entries should be handled properly before. */
-		mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
-		sh->flow_tbls = NULL;
-	}
-	if (sh->tag_table) {
-		/* tags should be destroyed with flow before. */
-		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
-		sh->tag_table = NULL;
-	}
 	if (sh->rx_domain) {
 		mlx5_glue->dr_destroy_domain(sh->rx_domain);
 		sh->rx_domain = NULL;
@@ -874,14 +991,18 @@ struct mlx5_flow_id_pool *
 		sh->esw_drop_action = NULL;
 	}
 #endif
+	if (sh->tag_table) {
+		/* tags should be destroyed with flow before. */
+		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
+		sh->tag_table = NULL;
+	}
 	if (sh->pop_vlan_action) {
 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
 		sh->pop_vlan_action = NULL;
 	}
 	pthread_mutex_destroy(&sh->dv_mutex);
-#else
-	(void)priv;
-#endif
+#endif /* HAVE_MLX5DV_DR */
+	mlx5_free_table_hash_list(priv);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2094e18..e482388 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6173,24 +6173,16 @@ struct field_modify_info modify_tcp[] = {
 			.direction = !!egress,
 		}
 	};
-	struct mlx5_hlist_entry *pos;
+	struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+							 table_key.v64);
 	struct mlx5_flow_tbl_data_entry *tbl_data;
-
-#ifdef HAVE_MLX5DV_DR
 	int ret;
 	void *domain;
 
-	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
 	if (pos) {
 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
 					entry);
 		tbl = &tbl_data->tbl;
-		if (!tbl->obj) {
-			rte_flow_error_set(error, ENOKEY,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "cannot find created table");
-			return NULL;
-		}
 		rte_atomic32_inc(&tbl->refcnt);
 		return tbl;
 	}
@@ -6236,24 +6228,6 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_inc(&tbl->refcnt);
 	return tbl;
-#else
-	/* Just to make the compiling pass when no HAVE_MLX5DV_DR defined. */
-	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
-	if (pos) {
-		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
-					entry);
-		tbl = &tbl_data->tbl;
-		if (!tbl->obj) {
-			rte_flow_error_set(error, ENOKEY,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "cannot find created table");
-			return NULL;
-		}
-		rte_atomic32_inc(&tbl->refcnt);
-		return tbl;
-	}
-	return NULL;
-#endif
 }
 
 /**
@@ -6348,18 +6322,14 @@ struct field_modify_info modify_tcp[] = {
 			rte_atomic32_inc(&cache_matcher->refcnt);
 			dev_flow->dv.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
-#ifdef HAVE_MLX5DV_DR
 			flow_dv_tbl_resource_release(dev, tbl);
-#endif
 			return 0;
 		}
 	}
 	/* Register new matcher. */
 	cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
 	if (!cache_matcher) {
-#ifdef HAVE_MLX5DV_DR
 		flow_dv_tbl_resource_release(dev, tbl);
-#endif
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot allocate matcher memory");
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-dev] [PATCH v2] net/mlx5: fix flow table hash list conversion
  2019-11-14 16:22 [dpdk-dev] [PATCH] net/mlx5: fix flow table hash list conversion Matan Azrad
@ 2019-11-17 12:14 ` Matan Azrad
  2019-11-17 14:13   ` Raslan Darawsheh
  0 siblings, 1 reply; 3+ messages in thread
From: Matan Azrad @ 2019-11-17 12:14 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, bingz

For the case when DR is not supported and DV is supported:
	multi-tables feature is off.
	In this case, only table 0 is supported.
	Table 0 structure wrongly was not created what prevented any
	matcher object to be created and even caused crashes.

Create the table hash list in DV case too.
Create table zero empty structure for each domain when DR is not
supported.
Allow NULL DR internal table object to be used.

Fixes: 860897d2895a ("net/mlx5: reorganize flow tables with hash list")
Cc: bingz@mellanox.com

Signed-off-by: Matan Azrad <matan@mellanox.com>
---

V2:
Fix checkpatch warnings.

 drivers/net/mlx5/mlx5.c         | 185 +++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_flow_dv.c |  34 +-------
 2 files changed, 154 insertions(+), 65 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 35baaf7..0dd3391 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -715,6 +715,144 @@ struct mlx5_flow_id_pool *
 }
 
 /**
+ * Destroy table hash list and all the root entries per domain.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ */
+static void
+mlx5_free_table_hash_list(struct mlx5_priv *priv)
+{
+	struct mlx5_ibv_shared *sh = priv->sh;
+	struct mlx5_flow_tbl_data_entry *tbl_data;
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = 0,
+			.reserved = 0,
+			.domain = 0,
+			.direction = 0,
+		}
+	};
+	struct mlx5_hlist_entry *pos;
+
+	if (!sh->flow_tbls)
+		return;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	table_key.direction = 1;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	table_key.direction = 0;
+	table_key.domain = 1;
+	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
+	if (pos) {
+		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+					entry);
+		assert(tbl_data);
+		mlx5_hlist_remove(sh->flow_tbls, pos);
+		rte_free(tbl_data);
+	}
+	mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
+}
+
+/**
+ * Initialize flow table hash list and create the root tables entry
+ * for each domain.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ *
+ * @return
+ *   Zero on success, positive error code otherwise.
+ */
+static int
+mlx5_alloc_table_hash_list(struct mlx5_priv *priv)
+{
+	struct mlx5_ibv_shared *sh = priv->sh;
+	char s[MLX5_HLIST_NAMESIZE];
+	int err = 0;
+
+	assert(sh);
+	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
+	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
+	if (!sh->flow_tbls) {
+		DRV_LOG(ERR, "flow tables with hash creation failed.\n");
+		err = ENOMEM;
+		return err;
+	}
+#ifndef HAVE_MLX5DV_DR
+	/*
+	 * In case we have not DR support, the zero tables should be created
+	 * because DV expect to see them even if they cannot be created by
+	 * RDMA-CORE.
+	 */
+	union mlx5_flow_tbl_key table_key = {
+		{
+			.table_id = 0,
+			.reserved = 0,
+			.domain = 0,
+			.direction = 0,
+		}
+	};
+	struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
+							  sizeof(*tbl_data), 0);
+
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err)
+		goto error;
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	table_key.direction = 1;
+	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err)
+		goto error;
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	table_key.direction = 0;
+	table_key.domain = 1;
+	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	if (!tbl_data) {
+		err = ENOMEM;
+		goto error;
+	}
+	tbl_data->entry.key = table_key.v64;
+	err = mlx5_hlist_insert(sh->flow_tbls, &tbl_data->entry);
+	if (err)
+		goto error;
+	rte_atomic32_init(&tbl_data->tbl.refcnt);
+	rte_atomic32_inc(&tbl_data->tbl.refcnt);
+	return err;
+error:
+	mlx5_free_table_hash_list(priv);
+#endif /* HAVE_MLX5DV_DR */
+	return err;
+
+}
+
+/**
  * Initialize DR related data within private structure.
  * Routine checks the reference counter and does actual
  * resources creation/initialization only if counter is zero.
@@ -728,13 +866,15 @@ struct mlx5_flow_id_pool *
 static int
 mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 {
+	int err = mlx5_alloc_table_hash_list(priv);
+
+	if (err)
+		return err;
 #ifdef HAVE_MLX5DV_DR
 	struct mlx5_ibv_shared *sh = priv->sh;
-	int err = 0;
-	void *domain;
 	char s[MLX5_HLIST_NAMESIZE];
+	void *domain;
 
-	assert(sh);
 	if (sh->dv_refcnt) {
 		/* Shared DV/DR structures is already initialized. */
 		sh->dv_refcnt++;
@@ -772,20 +912,11 @@ struct mlx5_flow_id_pool *
 		sh->esw_drop_action = mlx5_glue->dr_create_flow_action_drop();
 	}
 #endif
-	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
-	sh->flow_tbls = mlx5_hlist_create(s,
-					  MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
-	if (!sh->flow_tbls) {
-		DRV_LOG(ERR, "flow tables with hash creation failed.\n");
-		err = -ENOMEM;
-		goto error;
-	}
 	/* create tags hash list table. */
 	snprintf(s, sizeof(s), "%s_tags", priv->sh->ibdev_name);
 	sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
 	if (!sh->flow_tbls) {
 		DRV_LOG(ERR, "tags with hash creation failed.\n");
-		err = -ENOMEM;
 		goto error;
 	}
 	sh->pop_vlan_action = mlx5_glue->dr_create_flow_action_pop_vlan();
@@ -807,10 +938,6 @@ struct mlx5_flow_id_pool *
 		mlx5_glue->dr_destroy_domain(sh->fdb_domain);
 		sh->fdb_domain = NULL;
 	}
-	if (sh->flow_tbls) {
-		mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
-		sh->flow_tbls = NULL;
-	}
 	if (sh->esw_drop_action) {
 		mlx5_glue->destroy_flow_action(sh->esw_drop_action);
 		sh->esw_drop_action = NULL;
@@ -819,11 +946,9 @@ struct mlx5_flow_id_pool *
 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
 		sh->pop_vlan_action = NULL;
 	}
-	return err;
-#else
-	(void)priv;
-	return 0;
+	mlx5_free_table_hash_list(priv);
 #endif
+	return err;
 }
 
 /**
@@ -846,16 +971,6 @@ struct mlx5_flow_id_pool *
 	assert(sh->dv_refcnt);
 	if (sh->dv_refcnt && --sh->dv_refcnt)
 		return;
-	if (sh->flow_tbls) {
-		/* flow table entries should be handled properly before. */
-		mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
-		sh->flow_tbls = NULL;
-	}
-	if (sh->tag_table) {
-		/* tags should be destroyed with flow before. */
-		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
-		sh->tag_table = NULL;
-	}
 	if (sh->rx_domain) {
 		mlx5_glue->dr_destroy_domain(sh->rx_domain);
 		sh->rx_domain = NULL;
@@ -874,14 +989,18 @@ struct mlx5_flow_id_pool *
 		sh->esw_drop_action = NULL;
 	}
 #endif
+	if (sh->tag_table) {
+		/* tags should be destroyed with flow before. */
+		mlx5_hlist_destroy(sh->tag_table, NULL, NULL);
+		sh->tag_table = NULL;
+	}
 	if (sh->pop_vlan_action) {
 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
 		sh->pop_vlan_action = NULL;
 	}
 	pthread_mutex_destroy(&sh->dv_mutex);
-#else
-	(void)priv;
-#endif
+#endif /* HAVE_MLX5DV_DR */
+	mlx5_free_table_hash_list(priv);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2094e18..e482388 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6173,24 +6173,16 @@ struct field_modify_info modify_tcp[] = {
 			.direction = !!egress,
 		}
 	};
-	struct mlx5_hlist_entry *pos;
+	struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+							 table_key.v64);
 	struct mlx5_flow_tbl_data_entry *tbl_data;
-
-#ifdef HAVE_MLX5DV_DR
 	int ret;
 	void *domain;
 
-	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
 	if (pos) {
 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
 					entry);
 		tbl = &tbl_data->tbl;
-		if (!tbl->obj) {
-			rte_flow_error_set(error, ENOKEY,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "cannot find created table");
-			return NULL;
-		}
 		rte_atomic32_inc(&tbl->refcnt);
 		return tbl;
 	}
@@ -6236,24 +6228,6 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_inc(&tbl->refcnt);
 	return tbl;
-#else
-	/* Just to make the compiling pass when no HAVE_MLX5DV_DR defined. */
-	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
-	if (pos) {
-		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
-					entry);
-		tbl = &tbl_data->tbl;
-		if (!tbl->obj) {
-			rte_flow_error_set(error, ENOKEY,
-					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-					   NULL, "cannot find created table");
-			return NULL;
-		}
-		rte_atomic32_inc(&tbl->refcnt);
-		return tbl;
-	}
-	return NULL;
-#endif
 }
 
 /**
@@ -6348,18 +6322,14 @@ struct field_modify_info modify_tcp[] = {
 			rte_atomic32_inc(&cache_matcher->refcnt);
 			dev_flow->dv.matcher = cache_matcher;
 			/* old matcher should not make the table ref++. */
-#ifdef HAVE_MLX5DV_DR
 			flow_dv_tbl_resource_release(dev, tbl);
-#endif
 			return 0;
 		}
 	}
 	/* Register new matcher. */
 	cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
 	if (!cache_matcher) {
-#ifdef HAVE_MLX5DV_DR
 		flow_dv_tbl_resource_release(dev, tbl);
-#endif
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot allocate matcher memory");
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/mlx5: fix flow table hash list conversion
  2019-11-17 12:14 ` [dpdk-dev] [PATCH v2] " Matan Azrad
@ 2019-11-17 14:13   ` Raslan Darawsheh
  0 siblings, 0 replies; 3+ messages in thread
From: Raslan Darawsheh @ 2019-11-17 14:13 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: Slava Ovsiienko, Bing Zhao

Hi,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Matan Azrad
> Sent: Sunday, November 17, 2019 2:15 PM
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@mellanox.com>; Bing Zhao
> <bingz@mellanox.com>
> Subject: [dpdk-dev] [PATCH v2] net/mlx5: fix flow table hash list conversion
> 
> For the case when DR is not supported and DV is supported:
> 	multi-tables feature is off.
> 	In this case, only table 0 is supported.
> 	Table 0 structure wrongly was not created what prevented any
> 	matcher object to be created and even caused crashes.
> 
> Create the table hash list in DV case too.
> Create table zero empty structure for each domain when DR is not
> supported.
> Allow NULL DR internal table object to be used.
> 
> Fixes: 860897d2895a ("net/mlx5: reorganize flow tables with hash list")
> Cc: bingz@mellanox.com
> 
> Signed-off-by: Matan Azrad <matan@mellanox.com>
> ---
> 
> V2:
> Fix checkpatch warnings.
> 
Fixed issue with check patch (removed extra empty line)

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-11-17 14:13 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-14 16:22 [dpdk-dev] [PATCH] net/mlx5: fix flow table hash list conversion Matan Azrad
2019-11-17 12:14 ` [dpdk-dev] [PATCH v2] " Matan Azrad
2019-11-17 14:13   ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).