DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: manage header reformat actions with hashed list
@ 2020-09-16 10:19 Suanming Mou
  2020-09-23  7:39 ` Raslan Darawsheh
  0 siblings, 1 reply; 2+ messages in thread
From: Suanming Mou @ 2020-09-16 10:19 UTC (permalink / raw)
  To: viacheslavo, matan; +Cc: rasland, dev

To manage encap decap header format actions mlx5 PMD used the single linked
list and lookup and insertion operations took too long times if there were
millions of objects and this impacted the flow insertion/deletion rate.

In order to optimize the performance the hashed list is engaged. The list
implementation is updated to support non-unique keys with few collisions.

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 16 +++++++
 drivers/net/mlx5/mlx5.h          |  2 +-
 drivers/net/mlx5/mlx5_defs.h     |  3 ++
 drivers/net/mlx5/mlx5_flow.h     | 15 ++++++-
 drivers/net/mlx5/mlx5_flow_dv.c  | 97 ++++++++++++++++++++++++++++++----------
 5 files changed, 107 insertions(+), 26 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 0511a55..49bceeb 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -250,6 +250,14 @@
 		err = ENOMEM;
 		goto error;
 	}
+	snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
+	sh->encaps_decaps = mlx5_hlist_create(s,
+					      MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ);
+	if (!sh->encaps_decaps) {
+		DRV_LOG(ERR, "encap decap hash creation failed");
+		err = ENOMEM;
+		goto error;
+	}
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
 
@@ -323,6 +331,10 @@
 		mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
 		sh->pop_vlan_action = NULL;
 	}
+	if (sh->encaps_decaps) {
+		mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL);
+		sh->encaps_decaps = NULL;
+	}
 	if (sh->modify_cmds) {
 		mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL);
 		sh->modify_cmds = NULL;
@@ -380,6 +392,10 @@
 	}
 	pthread_mutex_destroy(&sh->dv_mutex);
 #endif /* HAVE_MLX5DV_DR */
+	if (sh->encaps_decaps) {
+		mlx5_hlist_destroy(sh->encaps_decaps, NULL, NULL);
+		sh->encaps_decaps = NULL;
+	}
 	if (sh->modify_cmds) {
 		mlx5_hlist_destroy(sh->modify_cmds, NULL, NULL);
 		sh->modify_cmds = NULL;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 865e72d..9aaf585 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -635,7 +635,7 @@ struct mlx5_dev_ctx_shared {
 	/* Direct Rules tables for FDB, NIC TX+RX */
 	void *esw_drop_action; /* Pointer to DR E-Switch drop action. */
 	void *pop_vlan_action; /* Pointer to DR pop VLAN action. */
-	uint32_t encaps_decaps; /* Encap/decap action indexed memory list. */
+	struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
 	struct mlx5_hlist *modify_cmds;
 	struct mlx5_hlist *tag_table;
 	uint32_t port_id_action_list; /* List of port ID actions. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 90f1839..0df4739 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -190,6 +190,9 @@
 /* Size of the simple hash table for header modify table. */
 #define MLX5_FLOW_HDR_MODIFY_HTABLE_SZ (1 << 16)
 
+/* Size of the simple hash table for encap decap table. */
+#define MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ (1 << 16)
+
 /* Hairpin TX/RX queue configuration parameters. */
 #define MLX5_HAIRPIN_QUEUE_STRIDE 6
 #define MLX5_HAIRPIN_JUMBO_LOG_SIZE (14 + 2)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 92301e4..279daf2 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -388,9 +388,21 @@ struct mlx5_flow_dv_matcher {
 
 #define MLX5_ENCAP_MAX_LEN 132
 
+/* Encap/decap resource key of the hash organization. */
+union mlx5_flow_encap_decap_key {
+	struct {
+		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
+		uint32_t refmt_type:8;	/**< Header reformat type. */
+		uint32_t buf_size:8;	/**< Encap buf size. */
+		uint32_t table_level:8;	/**< Root table or not. */
+		uint32_t cksum;		/**< Encap buf check sum. */
+	};
+	uint64_t v64;			/**< full 64bits value of key */
+};
+
 /* Encap/decap resource structure. */
 struct mlx5_flow_dv_encap_decap_resource {
-	ILIST_ENTRY(uint32_t)next;
+	struct mlx5_hlist_entry entry;
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
 	void *action;
@@ -400,6 +412,7 @@ struct mlx5_flow_dv_encap_decap_resource {
 	uint8_t reformat_type;
 	uint8_t ft_type;
 	uint64_t flags; /**< Flags for RDMA API. */
+	uint32_t idx; /**< Index for the index memory pool. */
 };
 
 /* Tag resource structure. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 56529c8..3819cdb 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2546,6 +2546,39 @@ struct field_modify_info modify_tcp[] = {
 }
 
 /**
+ * Match encap_decap resource.
+ *
+ * @param entry
+ *   Pointer to exist resource entry object.
+ * @param ctx
+ *   Pointer to new encap_decap resource.
+ *
+ * @return
+ *   0 on matching, -1 otherwise.
+ */
+static int
+flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
+{
+	struct mlx5_flow_dv_encap_decap_resource *resource;
+	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+	resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
+	cache_resource = container_of(entry,
+				      struct mlx5_flow_dv_encap_decap_resource,
+				      entry);
+	if (resource->entry.key == cache_resource->entry.key &&
+	    resource->reformat_type == cache_resource->reformat_type &&
+	    resource->ft_type == cache_resource->ft_type &&
+	    resource->flags == cache_resource->flags &&
+	    resource->size == cache_resource->size &&
+	    !memcmp((const void *)resource->buf,
+		    (const void *)cache_resource->buf,
+		    resource->size))
+		return 0;
+	return -1;
+}
+
+/**
  * Find existing encap/decap resource or create and register a new one.
  *
  * @param[in, out] dev
@@ -2571,7 +2604,16 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 	struct mlx5dv_dr_domain *domain;
-	uint32_t idx = 0;
+	struct mlx5_hlist_entry *entry;
+	union mlx5_flow_encap_decap_key encap_decap_key = {
+		{
+			.ft_type = resource->ft_type,
+			.refmt_type = resource->reformat_type,
+			.buf_size = resource->size,
+			.table_level = !!dev_flow->dv.group,
+			.cksum = 0,
+		}
+	};
 	int ret;
 
 	resource->flags = dev_flow->dv.group ? 0 : 1;
@@ -2581,24 +2623,23 @@ struct field_modify_info modify_tcp[] = {
 		domain = sh->rx_domain;
 	else
 		domain = sh->tx_domain;
+	encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
+						resource->size, 0);
+	resource->entry.key = encap_decap_key.v64;
 	/* Lookup a matching resource from cache. */
-	ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
-		      cache_resource, next) {
-		if (resource->reformat_type == cache_resource->reformat_type &&
-		    resource->ft_type == cache_resource->ft_type &&
-		    resource->flags == cache_resource->flags &&
-		    resource->size == cache_resource->size &&
-		    !memcmp((const void *)resource->buf,
-			    (const void *)cache_resource->buf,
-			    resource->size)) {
-			DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
-				(void *)cache_resource,
-				rte_atomic32_read(&cache_resource->refcnt));
-			rte_atomic32_inc(&cache_resource->refcnt);
-			dev_flow->handle->dvh.rix_encap_decap = idx;
-			dev_flow->dv.encap_decap = cache_resource;
-			return 0;
-		}
+	entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
+				     flow_dv_encap_decap_resource_match,
+				     (void *)resource);
+	if (entry) {
+		cache_resource = container_of(entry,
+			struct mlx5_flow_dv_encap_decap_resource, entry);
+		DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
+			(void *)cache_resource,
+			rte_atomic32_read(&cache_resource->refcnt));
+		rte_atomic32_inc(&cache_resource->refcnt);
+		dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
+		dev_flow->dv.encap_decap = cache_resource;
+		return 0;
 	}
 	/* Register new encap/decap resource. */
 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
@@ -2608,6 +2649,7 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot allocate resource memory");
 	*cache_resource = *resource;
+	cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
 	ret = mlx5_flow_os_create_flow_action_packet_reformat
 					(sh->ctx, domain, cache_resource,
 					 &cache_resource->action);
@@ -2619,9 +2661,17 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
-	ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
-		     dev_flow->handle->dvh.rix_encap_decap, cache_resource,
-		     next);
+	if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
+				 flow_dv_encap_decap_resource_match,
+				 (void *)cache_resource)) {
+		claim_zero(mlx5_flow_os_destroy_flow_action
+						(cache_resource->action));
+		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+				cache_resource->idx);
+		return rte_flow_error_set(error, EEXIST,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL, "action exist");
+	}
 	dev_flow->dv.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -9092,9 +9142,8 @@ struct field_modify_info modify_tcp[] = {
 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
 		claim_zero(mlx5_flow_os_destroy_flow_action
 						(cache_resource->action));
-		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
-			     &priv->sh->encaps_decaps, idx,
-			     cache_resource, next);
+		mlx5_hlist_remove(priv->sh->encaps_decaps,
+				  &cache_resource->entry);
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
 		DRV_LOG(DEBUG, "encap/decap resource %p: removed",
 			(void *)cache_resource);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: manage header reformat actions with hashed list
  2020-09-16 10:19 [dpdk-dev] [PATCH] net/mlx5: manage header reformat actions with hashed list Suanming Mou
@ 2020-09-23  7:39 ` Raslan Darawsheh
  0 siblings, 0 replies; 2+ messages in thread
From: Raslan Darawsheh @ 2020-09-23  7:39 UTC (permalink / raw)
  To: Suanming Mou, Slava Ovsiienko, Matan Azrad; +Cc: dev

Hi,

> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Wednesday, September 16, 2020 1:20 PM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: Raslan Darawsheh <rasland@nvidia.com>; dev@dpdk.org
> Subject: [PATCH] net/mlx5: manage header reformat actions with hashed list
> 
> To manage encap decap header format actions mlx5 PMD used the single
> linked
> list and lookup and insertion operations took too long times if there were
> millions of objects and this impacted the flow insertion/deletion rate.
> 
> In order to optimize the performance the hashed list is engaged. The list
> implementation is updated to support non-unique keys with few collisions.
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---

Patch applied to next-net-mlx,

Kindest regards
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-09-23  7:39 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-16 10:19 [dpdk-dev] [PATCH] net/mlx5: manage header reformat actions with hashed list Suanming Mou
2020-09-23  7:39 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).