DPDK patches and discussions
 help / color / mirror / Atom feed
From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: orika@mellanox.com, rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH 7/7] net/mlx5: convert Rx/Tx queue objects to unified malloc
Date: Wed, 15 Jul 2020 12:00:03 +0800	[thread overview]
Message-ID: <1594785603-152773-8-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1594785603-152773-1-git-send-email-suanmingm@mellanox.com>

This commit allocates the Rx/Tx queue objects from unified malloc
function.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 37 ++++++++++++++++++-------------------
 drivers/net/mlx5/mlx5_txq.c | 44 +++++++++++++++++++++-----------------------
 2 files changed, 39 insertions(+), 42 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index c8e3a82..9c9cc3a 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -641,7 +641,7 @@
 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
 	if (rxq_ctrl->rxq.wqes) {
-		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
 		rxq_ctrl->rxq.wqes = NULL;
 	}
 	if (rxq_ctrl->wq_umem) {
@@ -707,7 +707,7 @@
 			claim_zero(mlx5_glue->destroy_comp_channel
 				   (rxq_obj->channel));
 		LIST_REMOVE(rxq_obj, next);
-		rte_free(rxq_obj);
+		mlx5_free(rxq_obj);
 		return 0;
 	}
 	return 1;
@@ -1233,15 +1233,15 @@
 	/* Calculate and allocate WQ memory space. */
 	wqe_size = 1 << log_wqe_size; /* round up power of two.*/
 	wq_size = wqe_n * wqe_size;
-	buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
-				rxq_ctrl->socket);
+	buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
+			  MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket);
 	if (!buf)
 		return NULL;
 	rxq_data->wqes = buf;
 	rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
 						     buf, wq_size, 0);
 	if (!rxq_ctrl->wq_umem) {
-		rte_free(buf);
+		mlx5_free(buf);
 		return NULL;
 	}
 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
@@ -1275,8 +1275,8 @@
 
 	MLX5_ASSERT(rxq_data);
 	MLX5_ASSERT(!rxq_ctrl->obj);
-	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-				 rxq_ctrl->socket);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+			   rxq_ctrl->socket);
 	if (!tmpl) {
 		DRV_LOG(ERR,
 			"port %u Rx queue %u cannot allocate verbs resources",
@@ -1294,7 +1294,7 @@
 			DRV_LOG(ERR, "total data size %u power of 2 is "
 				"too large for hairpin",
 				priv->config.log_hp_size);
-			rte_free(tmpl);
+			mlx5_free(tmpl);
 			rte_errno = ERANGE;
 			return NULL;
 		}
@@ -1314,7 +1314,7 @@
 		DRV_LOG(ERR,
 			"port %u Rx hairpin queue %u can't create rq object",
 			dev->data->port_id, idx);
-		rte_free(tmpl);
+		mlx5_free(tmpl);
 		rte_errno = errno;
 		return NULL;
 	}
@@ -1362,8 +1362,8 @@ struct mlx5_rxq_obj *
 		return mlx5_rxq_obj_hairpin_new(dev, idx);
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
 	priv->verbs_alloc_ctx.obj = rxq_ctrl;
-	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-				 rxq_ctrl->socket);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+			   rxq_ctrl->socket);
 	if (!tmpl) {
 		DRV_LOG(ERR,
 			"port %u Rx queue %u cannot allocate verbs resources",
@@ -1503,7 +1503,7 @@ struct mlx5_rxq_obj *
 		if (tmpl->channel)
 			claim_zero(mlx5_glue->destroy_comp_channel
 							(tmpl->channel));
-		rte_free(tmpl);
+		mlx5_free(tmpl);
 		rte_errno = ret; /* Restore rte_errno. */
 	}
 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
@@ -1825,10 +1825,8 @@ struct mlx5_rxq_ctrl *
 		rte_errno = ENOSPC;
 		return NULL;
 	}
-	tmpl = rte_calloc_socket("RXQ", 1,
-				 sizeof(*tmpl) +
-				 desc_n * sizeof(struct rte_mbuf *),
-				 0, socket);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
 	if (!tmpl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -2007,7 +2005,7 @@ struct mlx5_rxq_ctrl *
 	LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
 	return tmpl;
 error:
-	rte_free(tmpl);
+	mlx5_free(tmpl);
 	return NULL;
 }
 
@@ -2033,7 +2031,8 @@ struct mlx5_rxq_ctrl *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
 
-	tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+			   SOCKET_ID_ANY);
 	if (!tmpl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -2112,7 +2111,7 @@ struct mlx5_rxq_ctrl *
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
 		LIST_REMOVE(rxq_ctrl, next);
-		rte_free(rxq_ctrl);
+		mlx5_free(rxq_ctrl);
 		(*priv->rxqs)[idx] = NULL;
 		return 0;
 	}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 35b3ade..ac9e455 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -31,6 +31,7 @@
 #include <mlx5_devx_cmds.h>
 #include <mlx5_common.h>
 #include <mlx5_common_mr.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5_utils.h"
@@ -521,8 +522,8 @@
 
 	MLX5_ASSERT(txq_data);
 	MLX5_ASSERT(!txq_ctrl->obj);
-	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-				 txq_ctrl->socket);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+			   txq_ctrl->socket);
 	if (!tmpl) {
 		DRV_LOG(ERR,
 			"port %u Tx queue %u cannot allocate memory resources",
@@ -541,7 +542,7 @@
 			DRV_LOG(ERR, "total data size %u power of 2 is "
 				"too large for hairpin",
 				priv->config.log_hp_size);
-			rte_free(tmpl);
+			mlx5_free(tmpl);
 			rte_errno = ERANGE;
 			return NULL;
 		}
@@ -561,7 +562,7 @@
 		DRV_LOG(ERR,
 			"port %u tx hairpin queue %u can't create sq object",
 			dev->data->port_id, idx);
-		rte_free(tmpl);
+		mlx5_free(tmpl);
 		rte_errno = errno;
 		return NULL;
 	}
@@ -715,8 +716,9 @@ struct mlx5_txq_obj *
 		rte_errno = errno;
 		goto error;
 	}
-	txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
-				    txq_ctrl->socket);
+	txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+			      sizeof(struct mlx5_txq_obj), 0,
+			      txq_ctrl->socket);
 	if (!txq_obj) {
 		DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
 			dev->data->port_id, idx);
@@ -758,11 +760,9 @@ struct mlx5_txq_obj *
 	txq_data->wqe_pi = 0;
 	txq_data->wqe_comp = 0;
 	txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
-	txq_data->fcqs = rte_calloc_socket(__func__,
-					   txq_data->cqe_s,
-					   sizeof(*txq_data->fcqs),
-					   RTE_CACHE_LINE_SIZE,
-					   txq_ctrl->socket);
+	txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+				     txq_data->cqe_s * sizeof(*txq_data->fcqs),
+				     RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
 	if (!txq_data->fcqs) {
 		DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
 			dev->data->port_id, idx);
@@ -818,9 +818,9 @@ struct mlx5_txq_obj *
 	if (tmpl.qp)
 		claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
 	if (txq_data->fcqs)
-		rte_free(txq_data->fcqs);
+		mlx5_free(txq_data->fcqs);
 	if (txq_obj)
-		rte_free(txq_obj);
+		mlx5_free(txq_obj);
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
 	rte_errno = ret; /* Restore rte_errno. */
 	return NULL;
@@ -874,10 +874,10 @@ struct mlx5_txq_obj *
 			claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
 			claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
 				if (txq_obj->txq_ctrl->txq.fcqs)
-					rte_free(txq_obj->txq_ctrl->txq.fcqs);
+					mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
 		}
 		LIST_REMOVE(txq_obj, next);
-		rte_free(txq_obj);
+		mlx5_free(txq_obj);
 		return 0;
 	}
 	return 1;
@@ -1293,10 +1293,8 @@ struct mlx5_txq_ctrl *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_txq_ctrl *tmpl;
 
-	tmpl = rte_calloc_socket("TXQ", 1,
-				 sizeof(*tmpl) +
-				 desc * sizeof(struct rte_mbuf *),
-				 0, socket);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc * sizeof(struct rte_mbuf *), 0, socket);
 	if (!tmpl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -1336,7 +1334,7 @@ struct mlx5_txq_ctrl *
 	LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
 	return tmpl;
 error:
-	rte_free(tmpl);
+	mlx5_free(tmpl);
 	return NULL;
 }
 
@@ -1362,8 +1360,8 @@ struct mlx5_txq_ctrl *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_txq_ctrl *tmpl;
 
-	tmpl = rte_calloc_socket("TXQ", 1,
-				 sizeof(*tmpl), 0, SOCKET_ID_ANY);
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+			   SOCKET_ID_ANY);
 	if (!tmpl) {
 		rte_errno = ENOMEM;
 		return NULL;
@@ -1432,7 +1430,7 @@ struct mlx5_txq_ctrl *
 		txq_free_elts(txq);
 		mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
 		LIST_REMOVE(txq, next);
-		rte_free(txq);
+		mlx5_free(txq);
 		(*priv->txqs)[idx] = NULL;
 		return 0;
 	}
-- 
1.8.3.1


  parent reply	other threads:[~2020-07-15  4:01 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15  3:59 [dpdk-dev] [PATCH 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-15  3:59 ` [dpdk-dev] [PATCH 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 4/7] common/mlx5: " Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-15  4:00 ` [dpdk-dev] [PATCH 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-15  4:00 ` Suanming Mou [this message]
2020-07-16  9:20 ` [dpdk-dev] [PATCH v2 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 4/7] common/mlx5: " Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-16  9:20   ` [dpdk-dev] [PATCH v2 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-17 13:50 ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-17 13:50   ` [dpdk-dev] [PATCH v3 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 4/7] common/mlx5: " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-17 13:51   ` [dpdk-dev] [PATCH v3 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-17 17:09   ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594785603-152773-8-git-send-email-suanmingm@mellanox.com \
    --to=suanmingm@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=orika@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).