DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: orika@mellanox.com, rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 3/7] net/mlx5: convert control path memory to unified malloc
Date: Thu, 16 Jul 2020 17:20:12 +0800	[thread overview]
Message-ID: <1594891216-11778-4-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1594891216-11778-1-git-send-email-suanmingm@mellanox.com>

This commit allocates the control path memory from unified malloc
function.

The objects be changed:

1. hlist;
2. rss key;
3. vlan vmwa;
4. indexed pool;
5. fdir objects;
6. meter profile;
7. flow counter pool;
8. hrxq and indirect table;
9. flow object cache resources;
10. temporary resources in flow create;

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.c            | 88 ++++++++++++++++++++------------------
 drivers/net/mlx5/mlx5_ethdev.c     | 15 ++++---
 drivers/net/mlx5/mlx5_flow.c       | 45 +++++++++++--------
 drivers/net/mlx5/mlx5_flow_dv.c    | 46 +++++++++++---------
 drivers/net/mlx5/mlx5_flow_meter.c | 11 ++---
 drivers/net/mlx5/mlx5_flow_verbs.c |  8 ++--
 drivers/net/mlx5/mlx5_mp.c         |  3 +-
 drivers/net/mlx5/mlx5_rss.c        | 13 ++++--
 drivers/net/mlx5/mlx5_rxq.c        | 37 +++++++++-------
 drivers/net/mlx5/mlx5_utils.c      | 60 +++++++++++++++-----------
 drivers/net/mlx5/mlx5_utils.h      |  2 +-
 drivers/net/mlx5/mlx5_vlan.c       |  8 ++--
 12 files changed, 190 insertions(+), 146 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 9b17266..ba86c68 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -40,6 +40,7 @@
 #include <mlx5_common.h>
 #include <mlx5_common_os.h>
 #include <mlx5_common_mp.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -194,8 +195,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
 	{
@@ -205,8 +206,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
 	{
@@ -216,8 +217,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
 	{
@@ -227,8 +228,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
 	{
@@ -238,8 +239,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
 #endif
@@ -250,8 +251,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
 	{
@@ -261,8 +262,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
 	{
@@ -272,8 +273,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
 	{
@@ -287,8 +288,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.grow_shift = 2,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
 	{
@@ -296,8 +297,8 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
 		.trunk_size = 4096,
 		.need_lock = 1,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
 };
@@ -323,15 +324,16 @@ struct mlx5_flow_id_pool *
 	struct mlx5_flow_id_pool *pool;
 	void *mem;
 
-	pool = rte_zmalloc("id pool allocation", sizeof(*pool),
-			   RTE_CACHE_LINE_SIZE);
+	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool),
+			   RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
 	if (!pool) {
 		DRV_LOG(ERR, "can't allocate id pool");
 		rte_errno  = ENOMEM;
 		return NULL;
 	}
-	mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
-			  RTE_CACHE_LINE_SIZE);
+	mem = mlx5_malloc(MLX5_MEM_ZERO,
+			  MLX5_FLOW_MIN_ID_POOL_SIZE * sizeof(uint32_t),
+			  RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
 	if (!mem) {
 		DRV_LOG(ERR, "can't allocate mem for id pool");
 		rte_errno  = ENOMEM;
@@ -344,7 +346,7 @@ struct mlx5_flow_id_pool *
 	pool->max_id = max_id;
 	return pool;
 error:
-	rte_free(pool);
+	mlx5_free(pool);
 	return NULL;
 }
 
@@ -357,8 +359,8 @@ struct mlx5_flow_id_pool *
 void
 mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool)
 {
-	rte_free(pool->free_arr);
-	rte_free(pool);
+	mlx5_free(pool->free_arr);
+	mlx5_free(pool);
 }
 
 /**
@@ -410,14 +412,15 @@ struct mlx5_flow_id_pool *
 		size = pool->curr - pool->free_arr;
 		size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
 		MLX5_ASSERT(size2 > size);
-		mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
+		mem = mlx5_malloc(0, size2 * sizeof(uint32_t), 0,
+				  SOCKET_ID_ANY);
 		if (!mem) {
 			DRV_LOG(ERR, "can't allocate mem for id pool");
 			rte_errno  = ENOMEM;
 			return -rte_errno;
 		}
 		memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
-		rte_free(pool->free_arr);
+		mlx5_free(pool->free_arr);
 		pool->free_arr = mem;
 		pool->curr = pool->free_arr + size;
 		pool->last = pool->free_arr + size2;
@@ -486,7 +489,7 @@ struct mlx5_flow_id_pool *
 	LIST_REMOVE(mng, next);
 	claim_zero(mlx5_devx_cmd_destroy(mng->dm));
 	claim_zero(mlx5_glue->devx_umem_dereg(mng->umem));
-	rte_free(mem);
+	mlx5_free(mem);
 }
 
 /**
@@ -534,10 +537,10 @@ struct mlx5_flow_id_pool *
 						    (pool, j)->dcs));
 			}
 			TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool, next);
-			rte_free(pool);
+			mlx5_free(pool);
 			pool = TAILQ_FIRST(&sh->cmng.ccont[i].pool_list);
 		}
-		rte_free(sh->cmng.ccont[i].pools);
+		mlx5_free(sh->cmng.ccont[i].pools);
 	}
 	mng = LIST_FIRST(&sh->cmng.mem_mngs);
 	while (mng) {
@@ -860,7 +863,7 @@ struct mlx5_dev_ctx_shared *
 					entry);
 		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
-		rte_free(tbl_data);
+		mlx5_free(tbl_data);
 	}
 	table_key.direction = 1;
 	pos = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64);
@@ -869,7 +872,7 @@ struct mlx5_dev_ctx_shared *
 					entry);
 		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
-		rte_free(tbl_data);
+		mlx5_free(tbl_data);
 	}
 	table_key.direction = 0;
 	table_key.domain = 1;
@@ -879,7 +882,7 @@ struct mlx5_dev_ctx_shared *
 					entry);
 		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
-		rte_free(tbl_data);
+		mlx5_free(tbl_data);
 	}
 	mlx5_hlist_destroy(sh->flow_tbls, NULL, NULL);
 }
@@ -923,8 +926,9 @@ struct mlx5_dev_ctx_shared *
 			.direction = 0,
 		}
 	};
-	struct mlx5_flow_tbl_data_entry *tbl_data = rte_zmalloc(NULL,
-							  sizeof(*tbl_data), 0);
+	struct mlx5_flow_tbl_data_entry *tbl_data = mlx5_malloc(MLX5_MEM_ZERO,
+							  sizeof(*tbl_data), 0,
+							  SOCKET_ID_ANY);
 
 	if (!tbl_data) {
 		err = ENOMEM;
@@ -937,7 +941,8 @@ struct mlx5_dev_ctx_shared *
 	rte_atomic32_init(&tbl_data->tbl.refcnt);
 	rte_atomic32_inc(&tbl_data->tbl.refcnt);
 	table_key.direction = 1;
-	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+			       SOCKET_ID_ANY);
 	if (!tbl_data) {
 		err = ENOMEM;
 		goto error;
@@ -950,7 +955,8 @@ struct mlx5_dev_ctx_shared *
 	rte_atomic32_inc(&tbl_data->tbl.refcnt);
 	table_key.direction = 0;
 	table_key.domain = 1;
-	tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
+	tbl_data = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tbl_data), 0,
+			       SOCKET_ID_ANY);
 	if (!tbl_data) {
 		err = ENOMEM;
 		goto error;
@@ -1181,9 +1187,9 @@ struct mlx5_dev_ctx_shared *
 	mlx5_mprq_free_mp(dev);
 	mlx5_os_free_shared_dr(priv);
 	if (priv->rss_conf.rss_key != NULL)
-		rte_free(priv->rss_conf.rss_key);
+		mlx5_free(priv->rss_conf.rss_key);
 	if (priv->reta_idx != NULL)
-		rte_free(priv->reta_idx);
+		mlx5_free(priv->reta_idx);
 	if (priv->config.vf)
 		mlx5_nl_mac_addr_flush(priv->nl_socket_route, mlx5_ifindex(dev),
 				       dev->data->mac_addrs,
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 6b4efcd..cefb450 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -21,6 +21,8 @@
 #include <rte_rwlock.h>
 #include <rte_cycles.h>
 
+#include <mlx5_malloc.h>
+
 #include "mlx5_rxtx.h"
 #include "mlx5_autoconf.h"
 
@@ -75,8 +77,8 @@
 		return -rte_errno;
 	}
 	priv->rss_conf.rss_key =
-		rte_realloc(priv->rss_conf.rss_key,
-			    MLX5_RSS_HASH_KEY_LEN, 0);
+		mlx5_realloc(priv->rss_conf.rss_key, MLX5_MEM_RTE,
+			    MLX5_RSS_HASH_KEY_LEN, 0, SOCKET_ID_ANY);
 	if (!priv->rss_conf.rss_key) {
 		DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
 			dev->data->port_id, rxqs_n);
@@ -142,7 +144,8 @@
 
 	if (priv->skip_default_rss_reta)
 		return ret;
-	rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
+	rss_queue_arr = mlx5_malloc(0, rxqs_n * sizeof(unsigned int), 0,
+				    SOCKET_ID_ANY);
 	if (!rss_queue_arr) {
 		DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
 			dev->data->port_id, rxqs_n);
@@ -163,7 +166,7 @@
 		DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
 			dev->data->port_id, rss_queue_n);
 		rte_errno = EINVAL;
-		rte_free(rss_queue_arr);
+		mlx5_free(rss_queue_arr);
 		return -rte_errno;
 	}
 	DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
@@ -179,7 +182,7 @@
 				rss_queue_n));
 	ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
 	if (ret) {
-		rte_free(rss_queue_arr);
+		mlx5_free(rss_queue_arr);
 		return ret;
 	}
 	/*
@@ -192,7 +195,7 @@
 		if (++j == rss_queue_n)
 			j = 0;
 	}
-	rte_free(rss_queue_arr);
+	mlx5_free(rss_queue_arr);
 	return ret;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ae5ccc2..cce6ce5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -32,6 +32,7 @@
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
 #include <mlx5_prm.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -4010,7 +4011,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
 			   sizeof(struct rte_flow_action_set_tag) +
 			   sizeof(struct rte_flow_action_jump);
-		ext_actions = rte_zmalloc(__func__, act_size, 0);
+		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+					  SOCKET_ID_ANY);
 		if (!ext_actions)
 			return rte_flow_error_set(error, ENOMEM,
 						  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4046,7 +4048,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		 */
 		act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
 			   sizeof(struct mlx5_flow_action_copy_mreg);
-		ext_actions = rte_zmalloc(__func__, act_size, 0);
+		ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
+					  SOCKET_ID_ANY);
 		if (!ext_actions)
 			return rte_flow_error_set(error, ENOMEM,
 						  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4140,7 +4143,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 * by flow_drv_destroy.
 	 */
 	flow_qrss_free_id(dev, qrss_id);
-	rte_free(ext_actions);
+	mlx5_free(ext_actions);
 	return ret;
 }
 
@@ -4205,7 +4208,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 #define METER_SUFFIX_ITEM 4
 		item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
 			    sizeof(struct mlx5_rte_flow_item_tag) * 2;
-		sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
+		sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
+					  0, SOCKET_ID_ANY);
 		if (!sfx_actions)
 			return rte_flow_error_set(error, ENOMEM,
 						  RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4244,7 +4248,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					 external, flow_idx, error);
 exit:
 	if (sfx_actions)
-		rte_free(sfx_actions);
+		mlx5_free(sfx_actions);
 	return ret;
 }
 
@@ -4658,8 +4662,8 @@ struct rte_flow *
 		}
 		if (priv_fdir_flow) {
 			LIST_REMOVE(priv_fdir_flow, next);
-			rte_free(priv_fdir_flow->fdir);
-			rte_free(priv_fdir_flow);
+			mlx5_free(priv_fdir_flow->fdir);
+			mlx5_free(priv_fdir_flow);
 		}
 	}
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
@@ -4799,11 +4803,12 @@ struct rte_flow *
 	struct mlx5_priv *priv = dev->data->dev_private;
 
 	if (!priv->inter_flows) {
-		priv->inter_flows = rte_calloc(__func__, 1,
+		priv->inter_flows = mlx5_malloc(MLX5_MEM_ZERO,
 				    MLX5_NUM_MAX_DEV_FLOWS *
 				    sizeof(struct mlx5_flow) +
 				    (sizeof(struct mlx5_flow_rss_desc) +
-				    sizeof(uint16_t) * UINT16_MAX) * 2, 0);
+				    sizeof(uint16_t) * UINT16_MAX) * 2, 0,
+				    SOCKET_ID_ANY);
 		if (!priv->inter_flows) {
 			DRV_LOG(ERR, "can't allocate intermediate memory.");
 			return;
@@ -4827,7 +4832,7 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	rte_free(priv->inter_flows);
+	mlx5_free(priv->inter_flows);
 	priv->inter_flows = NULL;
 }
 
@@ -5467,7 +5472,8 @@ struct rte_flow *
 	uint32_t flow_idx;
 	int ret;
 
-	fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
+	fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
+				SOCKET_ID_ANY);
 	if (!fdir_flow) {
 		rte_errno = ENOMEM;
 		return -rte_errno;
@@ -5480,8 +5486,9 @@ struct rte_flow *
 		rte_errno = EEXIST;
 		goto error;
 	}
-	priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
-				     0);
+	priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
+				     sizeof(struct mlx5_fdir_flow),
+				     0, SOCKET_ID_ANY);
 	if (!priv_fdir_flow) {
 		rte_errno = ENOMEM;
 		goto error;
@@ -5500,8 +5507,8 @@ struct rte_flow *
 		dev->data->port_id, (void *)flow);
 	return 0;
 error:
-	rte_free(priv_fdir_flow);
-	rte_free(fdir_flow);
+	mlx5_free(priv_fdir_flow);
+	mlx5_free(fdir_flow);
 	return -rte_errno;
 }
 
@@ -5541,8 +5548,8 @@ struct rte_flow *
 	LIST_REMOVE(priv_fdir_flow, next);
 	flow_idx = priv_fdir_flow->rix_flow;
 	flow_list_destroy(dev, &priv->flows, flow_idx);
-	rte_free(priv_fdir_flow->fdir);
-	rte_free(priv_fdir_flow);
+	mlx5_free(priv_fdir_flow->fdir);
+	mlx5_free(priv_fdir_flow);
 	DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
 		dev->data->port_id, flow_idx);
 	return 0;
@@ -5587,8 +5594,8 @@ struct rte_flow *
 		priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
 		LIST_REMOVE(priv_fdir_flow, next);
 		flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
-		rte_free(priv_fdir_flow->fdir);
-		rte_free(priv_fdir_flow);
+		mlx5_free(priv_fdir_flow->fdir);
+		mlx5_free(priv_fdir_flow);
 	}
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 8b5b683..7c121d6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -32,6 +32,7 @@
 
 #include <mlx5_devx_cmds.h>
 #include <mlx5_prm.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -2615,7 +2616,7 @@ struct field_modify_info modify_tcp[] = {
 					(sh->ctx, domain, cache_resource,
 					 &cache_resource->action);
 	if (ret) {
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create action");
@@ -2772,7 +2773,7 @@ struct field_modify_info modify_tcp[] = {
 				(priv->sh->fdb_domain, resource->port_id,
 				 &cache_resource->action);
 	if (ret) {
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create action");
@@ -2851,7 +2852,7 @@ struct field_modify_info modify_tcp[] = {
 					(domain, resource->vlan_tag,
 					 &cache_resource->action);
 	if (ret) {
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create action");
@@ -4024,8 +4025,9 @@ struct field_modify_info modify_tcp[] = {
 		}
 	}
 	/* Register new modify-header resource. */
-	cache_resource = rte_calloc(__func__, 1,
-				    sizeof(*cache_resource) + actions_len, 0);
+	cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(*cache_resource) + actions_len, 0,
+				    SOCKET_ID_ANY);
 	if (!cache_resource)
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -4036,7 +4038,7 @@ struct field_modify_info modify_tcp[] = {
 					(sh->ctx, ns, cache_resource,
 					 actions_len, &cache_resource->action);
 	if (ret) {
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create action");
@@ -4175,7 +4177,8 @@ struct field_modify_info modify_tcp[] = {
 			MLX5_COUNTERS_PER_POOL +
 			sizeof(struct mlx5_counter_stats_raw)) * raws_n +
 			sizeof(struct mlx5_counter_stats_mem_mng);
-	uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
+	uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, sysconf(_SC_PAGESIZE),
+				  SOCKET_ID_ANY);
 	int i;
 
 	if (!mem) {
@@ -4188,7 +4191,7 @@ struct field_modify_info modify_tcp[] = {
 						 IBV_ACCESS_LOCAL_WRITE);
 	if (!mem_mng->umem) {
 		rte_errno = errno;
-		rte_free(mem);
+		mlx5_free(mem);
 		return NULL;
 	}
 	mkey_attr.addr = (uintptr_t)mem;
@@ -4207,7 +4210,7 @@ struct field_modify_info modify_tcp[] = {
 	if (!mem_mng->dm) {
 		mlx5_glue->devx_umem_dereg(mem_mng->umem);
 		rte_errno = errno;
-		rte_free(mem);
+		mlx5_free(mem);
 		return NULL;
 	}
 	mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
@@ -4244,7 +4247,7 @@ struct field_modify_info modify_tcp[] = {
 	void *old_pools = cont->pools;
 	uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
 	uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
-	void *pools = rte_calloc(__func__, 1, mem_size, 0);
+	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
 
 	if (!pools) {
 		rte_errno = ENOMEM;
@@ -4263,7 +4266,7 @@ struct field_modify_info modify_tcp[] = {
 		mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
 			  MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
 		if (!mem_mng) {
-			rte_free(pools);
+			mlx5_free(pools);
 			return -ENOMEM;
 		}
 		for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
@@ -4278,7 +4281,7 @@ struct field_modify_info modify_tcp[] = {
 	cont->pools = pools;
 	rte_spinlock_unlock(&cont->resize_sl);
 	if (old_pools)
-		rte_free(old_pools);
+		mlx5_free(old_pools);
 	return 0;
 }
 
@@ -4367,7 +4370,7 @@ struct field_modify_info modify_tcp[] = {
 	size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
 	size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
 	size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
-	pool = rte_calloc(__func__, 1, size, 0);
+	pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
 	if (!pool) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -7467,7 +7470,8 @@ struct field_modify_info modify_tcp[] = {
 		}
 	}
 	/* Register new matcher. */
-	cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
+	cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
+				    SOCKET_ID_ANY);
 	if (!cache_matcher) {
 		flow_dv_tbl_resource_release(dev, tbl);
 		return rte_flow_error_set(error, ENOMEM,
@@ -7483,7 +7487,7 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
 					       &cache_matcher->matcher_object);
 	if (ret) {
-		rte_free(cache_matcher);
+		mlx5_free(cache_matcher);
 #ifdef HAVE_MLX5DV_DR
 		flow_dv_tbl_resource_release(dev, tbl);
 #endif
@@ -7558,7 +7562,7 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
 						  &cache_resource->action);
 	if (ret) {
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create action");
@@ -7567,7 +7571,7 @@ struct field_modify_info modify_tcp[] = {
 	rte_atomic32_inc(&cache_resource->refcnt);
 	if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
 		mlx5_flow_os_destroy_flow_action(cache_resource->action);
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		return rte_flow_error_set(error, EEXIST,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot insert tag");
@@ -8769,7 +8773,7 @@ struct field_modify_info modify_tcp[] = {
 		LIST_REMOVE(matcher, next);
 		/* table ref-- in release interface. */
 		flow_dv_tbl_resource_release(dev, matcher->tbl);
-		rte_free(matcher);
+		mlx5_free(matcher);
 		DRV_LOG(DEBUG, "port %u matcher %p: removed",
 			dev->data->port_id, (void *)matcher);
 		return 0;
@@ -8911,7 +8915,7 @@ struct field_modify_info modify_tcp[] = {
 		claim_zero(mlx5_flow_os_destroy_flow_action
 						(cache_resource->action));
 		LIST_REMOVE(cache_resource, next);
-		rte_free(cache_resource);
+		mlx5_free(cache_resource);
 		DRV_LOG(DEBUG, "modify-header resource %p: removed",
 			(void *)cache_resource);
 		return 0;
@@ -9284,7 +9288,7 @@ struct field_modify_info modify_tcp[] = {
 		flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
 	if (mtd->drop_actn)
 		claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
-	rte_free(mtd);
+	mlx5_free(mtd);
 	return 0;
 }
 
@@ -9417,7 +9421,7 @@ struct field_modify_info modify_tcp[] = {
 		rte_errno = ENOTSUP;
 		return NULL;
 	}
-	mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
+	mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
 	if (!mtb) {
 		DRV_LOG(ERR, "Failed to allocate memory for meter.");
 		return NULL;
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index 86c334b..bf34687 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -10,6 +10,7 @@
 #include <rte_mtr_driver.h>
 
 #include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5.h"
 #include "mlx5_flow.h"
@@ -356,8 +357,8 @@
 	if (ret)
 		return ret;
 	/* Meter profile memory allocation. */
-	fmp = rte_calloc(__func__, 1, sizeof(struct mlx5_flow_meter_profile),
-			 RTE_CACHE_LINE_SIZE);
+	fmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flow_meter_profile),
+			 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
 	if (fmp == NULL)
 		return -rte_mtr_error_set(error, ENOMEM,
 					  RTE_MTR_ERROR_TYPE_UNSPECIFIED,
@@ -374,7 +375,7 @@
 	TAILQ_INSERT_TAIL(fmps, fmp, next);
 	return 0;
 error:
-	rte_free(fmp);
+	mlx5_free(fmp);
 	return ret;
 }
 
@@ -417,7 +418,7 @@
 					  NULL, "Meter profile is in use.");
 	/* Remove from list. */
 	TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
-	rte_free(fmp);
+	mlx5_free(fmp);
 	return 0;
 }
 
@@ -1286,7 +1287,7 @@ struct mlx5_flow_meter *
 		MLX5_ASSERT(!fmp->ref_cnt);
 		/* Remove from list. */
 		TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
-		rte_free(fmp);
+		mlx5_free(fmp);
 	}
 	return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 781c97f..72106b4 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -28,6 +28,7 @@
 
 #include <mlx5_glue.h>
 #include <mlx5_prm.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -188,14 +189,15 @@
 			/* Resize the container pool array. */
 			size = sizeof(struct mlx5_flow_counter_pool *) *
 				     (n_valid + MLX5_CNT_CONTAINER_RESIZE);
-			pools = rte_zmalloc(__func__, size, 0);
+			pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
+					    SOCKET_ID_ANY);
 			if (!pools)
 				return 0;
 			if (n_valid) {
 				memcpy(pools, cont->pools,
 				       sizeof(struct mlx5_flow_counter_pool *) *
 				       n_valid);
-				rte_free(cont->pools);
+				mlx5_free(cont->pools);
 			}
 			cont->pools = pools;
 			cont->n += MLX5_CNT_CONTAINER_RESIZE;
@@ -203,7 +205,7 @@
 		/* Allocate memory for new pool*/
 		size = sizeof(*pool) + (sizeof(*cnt_ext) + sizeof(*cnt)) *
 		       MLX5_COUNTERS_PER_POOL;
-		pool = rte_calloc(__func__, 1, size, 0);
+		pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
 		if (!pool)
 			return 0;
 		pool->type |= CNT_POOL_TYPE_EXT;
diff --git a/drivers/net/mlx5/mlx5_mp.c b/drivers/net/mlx5/mlx5_mp.c
index a2b5c40..cf6e33b 100644
--- a/drivers/net/mlx5/mlx5_mp.c
+++ b/drivers/net/mlx5/mlx5_mp.c
@@ -12,6 +12,7 @@
 
 #include <mlx5_common_mp.h>
 #include <mlx5_common_mr.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -181,7 +182,7 @@
 		}
 	}
 exit:
-	free(mp_rep.msgs);
+	mlx5_free(mp_rep.msgs);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 653b069..a49edbc 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -21,6 +21,8 @@
 #include <rte_malloc.h>
 #include <rte_ethdev_driver.h>
 
+#include <mlx5_malloc.h>
+
 #include "mlx5_defs.h"
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -57,8 +59,10 @@
 			rte_errno = EINVAL;
 			return -rte_errno;
 		}
-		priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
-						     rss_conf->rss_key_len, 0);
+		priv->rss_conf.rss_key = mlx5_realloc(priv->rss_conf.rss_key,
+						      MLX5_MEM_RTE,
+						      rss_conf->rss_key_len,
+						      0, SOCKET_ID_ANY);
 		if (!priv->rss_conf.rss_key) {
 			rte_errno = ENOMEM;
 			return -rte_errno;
@@ -131,8 +135,9 @@
 	if (priv->reta_idx_n == reta_size)
 		return 0;
 
-	mem = rte_realloc(priv->reta_idx,
-			  reta_size * sizeof((*priv->reta_idx)[0]), 0);
+	mem = mlx5_realloc(priv->reta_idx, MLX5_MEM_RTE,
+			   reta_size * sizeof((*priv->reta_idx)[0]), 0,
+			   SOCKET_ID_ANY);
 	if (!mem) {
 		rte_errno = ENOMEM;
 		return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index b436f06..c8e3a82 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -31,6 +31,7 @@
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
@@ -734,7 +735,9 @@
 	if (!dev->data->dev_conf.intr_conf.rxq)
 		return 0;
 	mlx5_rx_intr_vec_disable(dev);
-	intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
+	intr_handle->intr_vec = mlx5_malloc(0,
+				n * sizeof(intr_handle->intr_vec[0]),
+				0, SOCKET_ID_ANY);
 	if (intr_handle->intr_vec == NULL) {
 		DRV_LOG(ERR,
 			"port %u failed to allocate memory for interrupt"
@@ -831,7 +834,7 @@
 free:
 	rte_intr_free_epoll_fd(intr_handle);
 	if (intr_handle->intr_vec)
-		free(intr_handle->intr_vec);
+		mlx5_free(intr_handle->intr_vec);
 	intr_handle->nb_efd = 0;
 	intr_handle->intr_vec = NULL;
 }
@@ -2187,8 +2190,8 @@ enum mlx5_rxq_type
 	struct mlx5_ind_table_obj *ind_tbl;
 	unsigned int i = 0, j = 0, k = 0;
 
-	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
-			     queues_n * sizeof(uint16_t), 0);
+	ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
+			      queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
 	if (!ind_tbl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -2231,8 +2234,9 @@ enum mlx5_rxq_type
 			      log2above(queues_n) :
 			      log2above(priv->config.ind_table_max_size));
 
-		rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
-				      rqt_n * sizeof(uint32_t), 0);
+		rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
+				      rqt_n * sizeof(uint32_t), 0,
+				      SOCKET_ID_ANY);
 		if (!rqt_attr) {
 			DRV_LOG(ERR, "port %u cannot allocate RQT resources",
 				dev->data->port_id);
@@ -2254,7 +2258,7 @@ enum mlx5_rxq_type
 			rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
 		ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
 							rqt_attr);
-		rte_free(rqt_attr);
+		mlx5_free(rqt_attr);
 		if (!ind_tbl->rqt) {
 			DRV_LOG(ERR, "port %u cannot create DevX RQT",
 				dev->data->port_id);
@@ -2269,7 +2273,7 @@ enum mlx5_rxq_type
 error:
 	for (j = 0; j < i; j++)
 		mlx5_rxq_release(dev, ind_tbl->queues[j]);
-	rte_free(ind_tbl);
+	mlx5_free(ind_tbl);
 	DEBUG("port %u cannot create indirection table", dev->data->port_id);
 	return NULL;
 }
@@ -2339,7 +2343,7 @@ enum mlx5_rxq_type
 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
 	if (!rte_atomic32_read(&ind_tbl->refcnt)) {
 		LIST_REMOVE(ind_tbl, next);
-		rte_free(ind_tbl);
+		mlx5_free(ind_tbl);
 		return 0;
 	}
 	return 1;
@@ -2761,7 +2765,7 @@ enum mlx5_rxq_type
 		rte_errno = errno;
 		goto error;
 	}
-	rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
 	if (!rxq) {
 		DEBUG("port %u cannot allocate drop Rx queue memory",
 		      dev->data->port_id);
@@ -2799,7 +2803,7 @@ enum mlx5_rxq_type
 		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
 	if (rxq->cq)
 		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
-	rte_free(rxq);
+	mlx5_free(rxq);
 	priv->drop_queue.rxq = NULL;
 }
 
@@ -2837,7 +2841,8 @@ enum mlx5_rxq_type
 		rte_errno = errno;
 		goto error;
 	}
-	ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+	ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
+			      SOCKET_ID_ANY);
 	if (!ind_tbl) {
 		rte_errno = ENOMEM;
 		goto error;
@@ -2863,7 +2868,7 @@ enum mlx5_rxq_type
 
 	claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
 	mlx5_rxq_obj_drop_release(dev);
-	rte_free(ind_tbl);
+	mlx5_free(ind_tbl);
 	priv->drop_queue.hrxq->ind_table = NULL;
 }
 
@@ -2888,7 +2893,7 @@ struct mlx5_hrxq *
 		rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
 		return priv->drop_queue.hrxq;
 	}
-	hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
 	if (!hrxq) {
 		DRV_LOG(WARNING,
 			"port %u cannot allocate memory for drop queue",
@@ -2945,7 +2950,7 @@ struct mlx5_hrxq *
 		mlx5_ind_table_obj_drop_release(dev);
 	if (hrxq) {
 		priv->drop_queue.hrxq = NULL;
-		rte_free(hrxq);
+		mlx5_free(hrxq);
 	}
 	return NULL;
 }
@@ -2968,7 +2973,7 @@ struct mlx5_hrxq *
 #endif
 		claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
 		mlx5_ind_table_obj_drop_release(dev);
-		rte_free(hrxq);
+		mlx5_free(hrxq);
 		priv->drop_queue.hrxq = NULL;
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index bf67192..25e8b27 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -5,6 +5,8 @@
 #include <rte_malloc.h>
 #include <rte_hash_crc.h>
 
+#include <mlx5_malloc.h>
+
 #include "mlx5_utils.h"
 
 struct mlx5_hlist *
@@ -27,7 +29,8 @@ struct mlx5_hlist *
 	alloc_size = sizeof(struct mlx5_hlist) +
 		     sizeof(struct mlx5_hlist_head) * act_size;
 	/* Using zmalloc, then no need to initialize the heads. */
-	h = rte_zmalloc(name, alloc_size, RTE_CACHE_LINE_SIZE);
+	h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
+			SOCKET_ID_ANY);
 	if (!h) {
 		DRV_LOG(ERR, "No memory for hash list %s creation",
 			name ? name : "None");
@@ -112,10 +115,10 @@ struct mlx5_hlist_entry *
 			if (cb)
 				cb(entry, ctx);
 			else
-				rte_free(entry);
+				mlx5_free(entry);
 		}
 	}
-	rte_free(h);
+	mlx5_free(h);
 }
 
 static inline void
@@ -193,16 +196,17 @@ struct mlx5_indexed_pool *
 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
 		return NULL;
-	pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk *
-				sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE);
+	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool) + cfg->grow_trunk *
+			   sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE,
+			   SOCKET_ID_ANY);
 	if (!pool)
 		return NULL;
 	pool->cfg = *cfg;
 	if (!pool->cfg.trunk_size)
 		pool->cfg.trunk_size = MLX5_IPOOL_DEFAULT_TRUNK_SIZE;
 	if (!cfg->malloc && !cfg->free) {
-		pool->cfg.malloc = rte_malloc_socket;
-		pool->cfg.free = rte_free;
+		pool->cfg.malloc = mlx5_malloc;
+		pool->cfg.free = mlx5_free;
 	}
 	pool->free_list = TRUNK_INVALID;
 	if (pool->cfg.need_lock)
@@ -237,10 +241,9 @@ struct mlx5_indexed_pool *
 		int n_grow = pool->n_trunk_valid ? pool->n_trunk :
 			     RTE_CACHE_LINE_SIZE / sizeof(void *);
 
-		p = pool->cfg.malloc(pool->cfg.type,
-				 (pool->n_trunk_valid + n_grow) *
-				 sizeof(struct mlx5_indexed_trunk *),
-				 RTE_CACHE_LINE_SIZE, rte_socket_id());
+		p = pool->cfg.malloc(0, (pool->n_trunk_valid + n_grow) *
+				     sizeof(struct mlx5_indexed_trunk *),
+				     RTE_CACHE_LINE_SIZE, rte_socket_id());
 		if (!p)
 			return -ENOMEM;
 		if (pool->trunks)
@@ -268,7 +271,7 @@ struct mlx5_indexed_pool *
 	/* rte_bitmap requires memory cacheline aligned. */
 	trunk_size += RTE_CACHE_LINE_ROUNDUP(data_size * pool->cfg.size);
 	trunk_size += bmp_size;
-	trunk = pool->cfg.malloc(pool->cfg.type, trunk_size,
+	trunk = pool->cfg.malloc(0, trunk_size,
 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
 	if (!trunk)
 		return -ENOMEM;
@@ -464,7 +467,7 @@ struct mlx5_indexed_pool *
 	if (!pool->trunks)
 		pool->cfg.free(pool->trunks);
 	mlx5_ipool_unlock(pool);
-	rte_free(pool);
+	mlx5_free(pool);
 	return 0;
 }
 
@@ -493,15 +496,16 @@ struct mlx5_l3t_tbl *
 		.grow_shift = 1,
 		.need_lock = 0,
 		.release_mem_en = 1,
-		.malloc = rte_malloc_socket,
-		.free = rte_free,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
 	};
 
 	if (type >= MLX5_L3T_TYPE_MAX) {
 		rte_errno = EINVAL;
 		return NULL;
 	}
-	tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_tbl), 1);
+	tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_l3t_tbl), 1,
+			  SOCKET_ID_ANY);
 	if (!tbl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -532,7 +536,7 @@ struct mlx5_l3t_tbl *
 	tbl->eip = mlx5_ipool_create(&l3t_ip_cfg);
 	if (!tbl->eip) {
 		rte_errno = ENOMEM;
-		rte_free(tbl);
+		mlx5_free(tbl);
 		tbl = NULL;
 	}
 	return tbl;
@@ -565,17 +569,17 @@ struct mlx5_l3t_tbl *
 					break;
 			}
 			MLX5_ASSERT(!m_tbl->ref_cnt);
-			rte_free(g_tbl->tbl[i]);
+			mlx5_free(g_tbl->tbl[i]);
 			g_tbl->tbl[i] = 0;
 			if (!(--g_tbl->ref_cnt))
 				break;
 		}
 		MLX5_ASSERT(!g_tbl->ref_cnt);
-		rte_free(tbl->tbl);
+		mlx5_free(tbl->tbl);
 		tbl->tbl = 0;
 	}
 	mlx5_ipool_destroy(tbl->eip);
-	rte_free(tbl);
+	mlx5_free(tbl);
 }
 
 uint32_t
@@ -667,11 +671,11 @@ struct mlx5_l3t_tbl *
 		m_tbl->tbl[(idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK] =
 									NULL;
 		if (!(--m_tbl->ref_cnt)) {
-			rte_free(m_tbl);
+			mlx5_free(m_tbl);
 			g_tbl->tbl
 			[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK] = NULL;
 			if (!(--g_tbl->ref_cnt)) {
-				rte_free(g_tbl);
+				mlx5_free(g_tbl);
 				tbl->tbl = 0;
 			}
 		}
@@ -693,8 +697,10 @@ struct mlx5_l3t_tbl *
 	/* Check the global table, create it if empty. */
 	g_tbl = tbl->tbl;
 	if (!g_tbl) {
-		g_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
-				    sizeof(void *) * MLX5_L3T_GT_SIZE, 1);
+		g_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(struct mlx5_l3t_level_tbl) +
+				    sizeof(void *) * MLX5_L3T_GT_SIZE, 1,
+				    SOCKET_ID_ANY);
 		if (!g_tbl) {
 			rte_errno = ENOMEM;
 			return -1;
@@ -707,8 +713,10 @@ struct mlx5_l3t_tbl *
 	 */
 	m_tbl = g_tbl->tbl[(idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK];
 	if (!m_tbl) {
-		m_tbl = rte_zmalloc(NULL, sizeof(struct mlx5_l3t_level_tbl) +
-				    sizeof(void *) * MLX5_L3T_MT_SIZE, 1);
+		m_tbl = mlx5_malloc(MLX5_MEM_ZERO,
+				    sizeof(struct mlx5_l3t_level_tbl) +
+				    sizeof(void *) * MLX5_L3T_MT_SIZE, 1,
+				    SOCKET_ID_ANY);
 		if (!m_tbl) {
 			rte_errno = ENOMEM;
 			return -1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index c4b9063..562b9b1 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -193,7 +193,7 @@ struct mlx5_indexed_pool_config {
 	/* Lock is needed for multiple thread usage. */
 	uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
 	const char *type; /* Memory allocate type name. */
-	void *(*malloc)(const char *type, size_t size, unsigned int align,
+	void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
 			int socket);
 	/* User defined memory allocator. */
 	void (*free)(void *addr); /* User defined memory release. */
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index f65e416..4308b71 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -33,6 +33,7 @@
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
 #include <mlx5_nl.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5.h"
 #include "mlx5_autoconf.h"
@@ -288,7 +289,8 @@ struct mlx5_nl_vlan_vmwa_context *
 		 */
 		return NULL;
 	}
-	vmwa = rte_zmalloc(__func__, sizeof(*vmwa), sizeof(uint32_t));
+	vmwa = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*vmwa), sizeof(uint32_t),
+			   SOCKET_ID_ANY);
 	if (!vmwa) {
 		DRV_LOG(WARNING,
 			"Can not allocate memory"
@@ -300,7 +302,7 @@ struct mlx5_nl_vlan_vmwa_context *
 		DRV_LOG(WARNING,
 			"Can not create Netlink socket"
 			" for VLAN workaround context");
-		rte_free(vmwa);
+		mlx5_free(vmwa);
 		return NULL;
 	}
 	vmwa->vf_ifindex = ifindex;
@@ -323,5 +325,5 @@ void mlx5_vlan_vmwa_exit(struct mlx5_nl_vlan_vmwa_context *vmwa)
 	}
 	if (vmwa->nl_socket >= 0)
 		close(vmwa->nl_socket);
-	rte_free(vmwa);
+	mlx5_free(vmwa);
 }
-- 
1.8.3.1


  parent reply	other threads:[~2020-07-16  9:21 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15  3:59 [dpdk-dev] [PATCH 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 4/7] common/mlx5: " Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-16  9:20 ` [dpdk-dev] [PATCH v2 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-16  9:20   ` Suanming Mou [this message]
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 4/7] common/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-17 13:50 ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-17 13:50   ` [dpdk-dev] [PATCH v3 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 4/7] common/mlx5: " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-17 17:09   ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594891216-11778-4-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=orika@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).