From: Suanming Mou <suanmingm@mellanox.com>
To: viacheslavo@mellanox.com, matan@mellanox.com
Cc: orika@mellanox.com, rasland@mellanox.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 7/7] net/mlx5: convert Rx/Tx queue objects to unified malloc
Date: Fri, 17 Jul 2020 21:51:05 +0800 [thread overview]
Message-ID: <1594993865-396296-8-git-send-email-suanmingm@mellanox.com> (raw)
In-Reply-To: <1594993865-396296-1-git-send-email-suanmingm@mellanox.com>
This commit allocates the Rx/Tx queue objects from unified malloc
function.
Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
drivers/net/mlx5/mlx5_rxq.c | 37 ++++++++++----------
drivers/net/mlx5/mlx5_txpp.c | 30 ++++++++--------
drivers/net/mlx5/mlx5_txq.c | 82 +++++++++++++++++++++-----------------------
3 files changed, 73 insertions(+), 76 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e8214d4..67d996c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -641,7 +641,7 @@
rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
{
if (rxq_ctrl->rxq.wqes) {
- rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+ mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
rxq_ctrl->rxq.wqes = NULL;
}
if (rxq_ctrl->wq_umem) {
@@ -707,7 +707,7 @@
claim_zero(mlx5_glue->destroy_comp_channel
(rxq_obj->channel));
LIST_REMOVE(rxq_obj, next);
- rte_free(rxq_obj);
+ mlx5_free(rxq_obj);
return 0;
}
return 1;
@@ -1233,15 +1233,15 @@
/* Calculate and allocate WQ memory space. */
wqe_size = 1 << log_wqe_size; /* round up power of two.*/
wq_size = wqe_n * wqe_size;
- buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
- rxq_ctrl->socket);
+ buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
+ MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket);
if (!buf)
return NULL;
rxq_data->wqes = buf;
rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
buf, wq_size, 0);
if (!rxq_ctrl->wq_umem) {
- rte_free(buf);
+ mlx5_free(buf);
return NULL;
}
mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
@@ -1275,8 +1275,8 @@
MLX5_ASSERT(rxq_data);
MLX5_ASSERT(!rxq_ctrl->obj);
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
if (!tmpl) {
DRV_LOG(ERR,
"port %u Rx queue %u cannot allocate verbs resources",
@@ -1294,7 +1294,7 @@
DRV_LOG(ERR, "total data size %u power of 2 is "
"too large for hairpin",
priv->config.log_hp_size);
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = ERANGE;
return NULL;
}
@@ -1314,7 +1314,7 @@
DRV_LOG(ERR,
"port %u Rx hairpin queue %u can't create rq object",
dev->data->port_id, idx);
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = errno;
return NULL;
}
@@ -1362,8 +1362,8 @@ struct mlx5_rxq_obj *
return mlx5_rxq_obj_hairpin_new(dev, idx);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq_ctrl;
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
if (!tmpl) {
DRV_LOG(ERR,
"port %u Rx queue %u cannot allocate verbs resources",
@@ -1503,7 +1503,7 @@ struct mlx5_rxq_obj *
if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel
(tmpl->channel));
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = ret; /* Restore rte_errno. */
}
if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
@@ -1825,10 +1825,8 @@ struct mlx5_rxq_ctrl *
rte_errno = ENOSPC;
return NULL;
}
- tmpl = rte_calloc_socket("RXQ", 1,
- sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *),
- 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+ desc_n * sizeof(struct rte_mbuf *), 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
@@ -2007,7 +2005,7 @@ struct mlx5_rxq_ctrl *
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
- rte_free(tmpl);
+ mlx5_free(tmpl);
return NULL;
}
@@ -2033,7 +2031,8 @@ struct mlx5_rxq_ctrl *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
- tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ SOCKET_ID_ANY);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
@@ -2112,7 +2111,7 @@ struct mlx5_rxq_ctrl *
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
- rte_free(rxq_ctrl);
+ mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 15c9a8e..77c1866 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -11,6 +11,8 @@
#include <rte_malloc.h>
#include <rte_cycles.h>
+#include <mlx5_malloc.h>
+
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_common_os.h"
@@ -134,13 +136,13 @@
if (wq->sq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
if (wq->sq_buf)
- rte_free((void *)(uintptr_t)wq->sq_buf);
+ mlx5_free((void *)(uintptr_t)wq->sq_buf);
if (wq->cq)
claim_zero(mlx5_devx_cmd_destroy(wq->cq));
if (wq->cq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
if (wq->cq_buf)
- rte_free((void *)(uintptr_t)wq->cq_buf);
+ mlx5_free((void *)(uintptr_t)wq->cq_buf);
memset(wq, 0, sizeof(*wq));
}
@@ -159,7 +161,7 @@
mlx5_txpp_destroy_send_queue(wq);
if (sh->txpp.tsa) {
- rte_free(sh->txpp.tsa);
+ mlx5_free(sh->txpp.tsa);
sh->txpp.tsa = NULL;
}
}
@@ -255,8 +257,8 @@
umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->cq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
return -ENOMEM;
@@ -304,8 +306,8 @@
umem_size = MLX5_WQE_SIZE * wq->sq_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->sq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
rte_errno = ENOMEM;
@@ -474,10 +476,10 @@
uint32_t umem_size, umem_dbrec;
int ret;
- sh->txpp.tsa = rte_zmalloc_socket(__func__,
- MLX5_TXPP_REARM_SQ_SIZE *
- sizeof(struct mlx5_txpp_ts),
- 0, sh->numa_node);
+ sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ MLX5_TXPP_REARM_SQ_SIZE *
+ sizeof(struct mlx5_txpp_ts),
+ 0, sh->numa_node);
if (!sh->txpp.tsa) {
DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
return -ENOMEM;
@@ -488,7 +490,7 @@
umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
+ wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
page_size, sh->numa_node);
if (!wq->cq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
@@ -543,8 +545,8 @@
umem_size = MLX5_WQE_SIZE * wq->sq_size;
umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
umem_size += MLX5_DBR_SIZE;
- wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
- page_size, sh->numa_node);
+ wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+ page_size, sh->numa_node);
if (!wq->sq_buf) {
DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
rte_errno = ENOMEM;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 4ab6ac1..4a73299 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -32,6 +32,7 @@
#include <mlx5_common.h>
#include <mlx5_common_mr.h>
#include <mlx5_common_os.h>
+#include <mlx5_malloc.h>
#include "mlx5_defs.h"
#include "mlx5_utils.h"
@@ -524,8 +525,8 @@
MLX5_ASSERT(txq_data);
MLX5_ASSERT(!txq_ctrl->obj);
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- txq_ctrl->socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ txq_ctrl->socket);
if (!tmpl) {
DRV_LOG(ERR,
"port %u Tx queue %u cannot allocate memory resources",
@@ -544,7 +545,7 @@
DRV_LOG(ERR, "total data size %u power of 2 is "
"too large for hairpin",
priv->config.log_hp_size);
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = ERANGE;
return NULL;
}
@@ -564,7 +565,7 @@
DRV_LOG(ERR,
"port %u tx hairpin queue %u can't create sq object",
dev->data->port_id, idx);
- rte_free(tmpl);
+ mlx5_free(tmpl);
rte_errno = errno;
return NULL;
}
@@ -597,7 +598,7 @@
if (txq_obj->sq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
if (txq_obj->sq_buf)
- rte_free(txq_obj->sq_buf);
+ mlx5_free(txq_obj->sq_buf);
if (txq_obj->cq_devx)
claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
if (txq_obj->cq_dbrec_page)
@@ -609,7 +610,7 @@
if (txq_obj->cq_umem)
claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
if (txq_obj->cq_buf)
- rte_free(txq_obj->cq_buf);
+ mlx5_free(txq_obj->cq_buf);
}
/**
@@ -648,9 +649,9 @@
MLX5_ASSERT(txq_data);
MLX5_ASSERT(!txq_ctrl->obj);
- txq_obj = rte_calloc_socket(__func__, 1,
- sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
+ txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(struct mlx5_txq_obj), 0,
+ txq_ctrl->socket);
if (!txq_obj) {
DRV_LOG(ERR,
"port %u Tx queue %u cannot allocate memory resources",
@@ -673,10 +674,10 @@
goto error;
}
/* Allocate memory buffer for CQEs. */
- txq_obj->cq_buf = rte_zmalloc_socket(__func__,
- nqe * sizeof(struct mlx5_cqe),
- MLX5_CQE_BUF_ALIGNMENT,
- sh->numa_node);
+ txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ nqe * sizeof(struct mlx5_cqe),
+ MLX5_CQE_BUF_ALIGNMENT,
+ sh->numa_node);
if (!txq_obj->cq_buf) {
DRV_LOG(ERR,
"port %u Tx queue %u cannot allocate memory (CQ)",
@@ -741,10 +742,9 @@
/* Create the Work Queue. */
nqe = RTE_MIN(1UL << txq_data->elts_n,
(uint32_t)sh->device_attr.max_qp_wr);
- txq_obj->sq_buf = rte_zmalloc_socket(__func__,
- nqe * sizeof(struct mlx5_wqe),
- page_size,
- sh->numa_node);
+ txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ nqe * sizeof(struct mlx5_wqe),
+ page_size, sh->numa_node);
if (!txq_obj->sq_buf) {
DRV_LOG(ERR,
"port %u Tx queue %u cannot allocate memory (SQ)",
@@ -825,11 +825,10 @@
dev->data->port_id, idx);
goto error;
}
- txq_data->fcqs = rte_calloc_socket(__func__,
- txq_data->cqe_s,
- sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE,
- txq_ctrl->socket);
+ txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ txq_data->cqe_s * sizeof(*txq_data->fcqs),
+ RTE_CACHE_LINE_SIZE,
+ txq_ctrl->socket);
if (!txq_data->fcqs) {
DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
dev->data->port_id, idx);
@@ -857,10 +856,10 @@
ret = rte_errno; /* Save rte_errno before cleanup. */
txq_release_sq_resources(txq_obj);
if (txq_data->fcqs) {
- rte_free(txq_data->fcqs);
+ mlx5_free(txq_data->fcqs);
txq_data->fcqs = NULL;
}
- rte_free(txq_obj);
+ mlx5_free(txq_obj);
rte_errno = ret; /* Restore rte_errno. */
return NULL;
#endif
@@ -1011,8 +1010,9 @@ struct mlx5_txq_obj *
rte_errno = errno;
goto error;
}
- txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
- txq_ctrl->socket);
+ txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(struct mlx5_txq_obj), 0,
+ txq_ctrl->socket);
if (!txq_obj) {
DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
dev->data->port_id, idx);
@@ -1054,11 +1054,9 @@ struct mlx5_txq_obj *
txq_data->wqe_pi = 0;
txq_data->wqe_comp = 0;
txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
- txq_data->fcqs = rte_calloc_socket(__func__,
- txq_data->cqe_s,
- sizeof(*txq_data->fcqs),
- RTE_CACHE_LINE_SIZE,
- txq_ctrl->socket);
+ txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ txq_data->cqe_s * sizeof(*txq_data->fcqs),
+ RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
if (!txq_data->fcqs) {
DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
dev->data->port_id, idx);
@@ -1114,11 +1112,11 @@ struct mlx5_txq_obj *
if (tmpl.qp)
claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
if (txq_data && txq_data->fcqs) {
- rte_free(txq_data->fcqs);
+ mlx5_free(txq_data->fcqs);
txq_data->fcqs = NULL;
}
if (txq_obj)
- rte_free(txq_obj);
+ mlx5_free(txq_obj);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
rte_errno = ret; /* Restore rte_errno. */
return NULL;
@@ -1175,11 +1173,11 @@ struct mlx5_txq_obj *
claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
}
if (txq_obj->txq_ctrl->txq.fcqs) {
- rte_free(txq_obj->txq_ctrl->txq.fcqs);
+ mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
txq_obj->txq_ctrl->txq.fcqs = NULL;
}
LIST_REMOVE(txq_obj, next);
- rte_free(txq_obj);
+ mlx5_free(txq_obj);
return 0;
}
return 1;
@@ -1595,10 +1593,8 @@ struct mlx5_txq_ctrl *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
- 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+ desc * sizeof(struct rte_mbuf *), 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
@@ -1638,7 +1634,7 @@ struct mlx5_txq_ctrl *
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
error:
- rte_free(tmpl);
+ mlx5_free(tmpl);
return NULL;
}
@@ -1664,8 +1660,8 @@ struct mlx5_txq_ctrl *
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
- tmpl = rte_calloc_socket("TXQ", 1,
- sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+ SOCKET_ID_ANY);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
@@ -1734,7 +1730,7 @@ struct mlx5_txq_ctrl *
txq_free_elts(txq);
mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq, next);
- rte_free(txq);
+ mlx5_free(txq);
(*priv->txqs)[idx] = NULL;
return 0;
}
--
1.8.3.1
next prev parent reply other threads:[~2020-07-17 13:53 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-15 3:59 [dpdk-dev] [PATCH 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-15 3:59 ` [dpdk-dev] [PATCH 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-15 3:59 ` [dpdk-dev] [PATCH 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-15 3:59 ` [dpdk-dev] [PATCH 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-15 4:00 ` [dpdk-dev] [PATCH 4/7] common/mlx5: " Suanming Mou
2020-07-15 4:00 ` [dpdk-dev] [PATCH 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-15 4:00 ` [dpdk-dev] [PATCH 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-15 4:00 ` [dpdk-dev] [PATCH 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 4/7] common/mlx5: " Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-16 9:20 ` [dpdk-dev] [PATCH v2 7/7] net/mlx5: convert Rx/Tx queue " Suanming Mou
2020-07-17 13:50 ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Suanming Mou
2020-07-17 13:50 ` [dpdk-dev] [PATCH v3 1/7] common/mlx5: add mlx5 memory management functions Suanming Mou
2020-07-17 13:51 ` [dpdk-dev] [PATCH v3 2/7] net/mlx5: add allocate memory from system devarg Suanming Mou
2020-07-17 13:51 ` [dpdk-dev] [PATCH v3 3/7] net/mlx5: convert control path memory to unified malloc Suanming Mou
2020-07-17 13:51 ` [dpdk-dev] [PATCH v3 4/7] common/mlx5: " Suanming Mou
2020-07-17 13:51 ` [dpdk-dev] [PATCH v3 5/7] common/mlx5: convert data path objects " Suanming Mou
2020-07-17 13:51 ` [dpdk-dev] [PATCH v3 6/7] net/mlx5: convert configuration " Suanming Mou
2020-07-17 13:51 ` Suanming Mou [this message]
2020-07-17 17:09 ` [dpdk-dev] [PATCH v3 0/7] net/mlx5: add sys_mem_en devarg Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1594993865-396296-8-git-send-email-suanmingm@mellanox.com \
--to=suanmingm@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=orika@mellanox.com \
--cc=rasland@mellanox.com \
--cc=viacheslavo@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).