From: Michael Baum <michaelba@nvidia.com>
To: <stable@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [PATCH 20.11 4/5] net/mlx5: fix memory socket selection in ASO management
Date: Mon, 21 Feb 2022 21:46:34 +0200 [thread overview]
Message-ID: <20220221194635.2458173-5-michaelba@nvidia.com> (raw)
In-Reply-To: <20220221194635.2458173-1-michaelba@nvidia.com>
[ upstream commit 147f6fb42bd7637b37a9180b0774275531c05f9b ]
In ASO objects creation (WQE, CQE and MR), socket number is given as
a parameter.
The selection was wrongly socket 0 hardcoded even if the user didn't
configure memory for this socket.
This patch replaces the selection to default socket (SOCKET_ID_ANY).
Fixes: f935ed4b645a ("net/mlx5: support flow hit action for aging")
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_age.c | 27 +++++++++++----------------
1 file changed, 11 insertions(+), 16 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c
index 6c4ee0d33c..aae7a3758a 100644
--- a/drivers/net/mlx5/mlx5_flow_age.c
+++ b/drivers/net/mlx5/mlx5_flow_age.c
@@ -38,8 +38,6 @@ mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
* Pointer to CQ to create.
* @param[in] log_desc_n
* Log of number of descriptors in queue.
- * @param[in] socket
- * Socket to use for allocation.
* @param[in] uar_page_id
* UAR page ID to use.
* @param[in] eqn
@@ -50,7 +48,7 @@ mlx5_aso_cq_destroy(struct mlx5_aso_cq *cq)
*/
static int
mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
- int socket, int uar_page_id, uint32_t eqn)
+ int uar_page_id, uint32_t eqn)
{
struct mlx5_devx_cq_attr attr = { 0 };
size_t pgsize = sysconf(_SC_PAGESIZE);
@@ -60,7 +58,7 @@ mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n,
cq->log_desc_n = log_desc_n;
umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
cq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
- 4096, socket);
+ 4096, SOCKET_ID_ANY);
if (!cq->umem_buf) {
DRV_LOG(ERR, "Failed to allocate memory for CQ.");
rte_errno = ENOMEM;
@@ -123,8 +121,6 @@ mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
* Size of MR buffer.
* @param[in/out] mr
* Pointer to MR to create.
- * @param[in] socket
- * Socket to use for allocation.
* @param[in] pdn
* Protection Domain number to use.
*
@@ -133,12 +129,12 @@ mlx5_aso_devx_dereg_mr(struct mlx5_aso_devx_mr *mr)
*/
static int
mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
- int socket, int pdn)
+ int pdn)
{
struct mlx5_devx_mkey_attr mkey_attr;
mr->buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, length, 4096,
- socket);
+ SOCKET_ID_ANY);
if (!mr->buf) {
DRV_LOG(ERR, "Failed to create ASO bits mem for MR by Devx.");
return -1;
@@ -240,8 +236,6 @@ mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
* Context returned from mlx5 open_device() glue function.
* @param[in/out] sq
* Pointer to SQ to create.
- * @param[in] socket
- * Socket to use for allocation.
* @param[in] uar
* User Access Region object.
* @param[in] pdn
@@ -255,7 +249,7 @@ mlx5_aso_init_sq(struct mlx5_aso_sq *sq)
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
+mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq,
struct mlx5dv_devx_uar *uar, uint32_t pdn,
uint32_t eqn, uint16_t log_desc_n, uint32_t ts_format)
{
@@ -268,14 +262,15 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
int ret;
if (mlx5_aso_devx_reg_mr(ctx, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
- sq_desc_n, &sq->mr, socket, pdn))
+ sq_desc_n, &sq->mr, pdn))
return -1;
- if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n, socket,
+ if (mlx5_aso_cq_create(ctx, &sq->cq, log_desc_n,
mlx5_os_get_devx_uar_page_id(uar), eqn))
goto error;
sq->log_desc_n = log_desc_n;
- sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size +
- sizeof(*sq->db_rec) * 2, 4096, socket);
+ sq->umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ wq_size + sizeof(*sq->db_rec) * 2,
+ 4096, SOCKET_ID_ANY);
if (!sq->umem_buf) {
DRV_LOG(ERR, "Can't allocate wqe buffer.");
rte_errno = ENOMEM;
@@ -347,7 +342,7 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int socket,
int
mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh)
{
- return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
+ return mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq,
sh->tx_uar, sh->pdn, sh->eqn,
MLX5_ASO_QUEUE_LOG_DESC, sh->sq_ts_format);
}
--
2.25.1
next prev parent reply other threads:[~2022-02-21 19:46 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-21 19:46 [PATCH 20.11 0/5] mlx5: some fixes Michael Baum
2022-02-21 19:46 ` [PATCH 20.11 1/5] common/mlx5: add minimum WQE size for striding RQ Michael Baum
2022-02-21 19:46 ` [PATCH 20.11 2/5] net/mlx5: improve stride parameter names Michael Baum
2022-02-21 19:46 ` [PATCH 20.11 3/5] net/mlx5: fix MPRQ stride devargs adjustment Michael Baum
2022-02-21 19:46 ` Michael Baum [this message]
2022-02-21 19:46 ` [PATCH 20.11 5/5] common/mlx5: fix error handling in multi-class probe Michael Baum
2022-02-22 14:32 ` [PATCH 20.11 0/5] mlx5: some fixes Luca Boccassi
2022-02-23 16:06 ` Michael Baum
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220221194635.2458173-5-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=matan@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).