From: Michael Baum <michaelba@nvidia.com>
To: dev@dpdk.org
Cc: Matan Azrad <matan@nvidia.com>,
Raslan Darawsheh <rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
stable@dpdk.org
Subject: [dpdk-dev] [PATCH 2/2] net/mlx5: workaround counter memory region creation
Date: Thu, 18 Mar 2021 09:56:08 +0000 [thread overview]
Message-ID: <1616061368-29768-3-git-send-email-michaelba@nvidia.com> (raw)
In-Reply-To: <1616061368-29768-1-git-send-email-michaelba@nvidia.com>
Due to kernel issue in direct MKEY creation using the DevX API for
physical memory, this patch replaces the counter MR creation to use
Verbs API.
Fixes: 3aa279157fa0 ("net/mlx5: synchronize flow counter pool creation")
Cc: stable@dpdk.org
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 10 ----------
drivers/net/mlx5/mlx5.c | 11 +++++++----
drivers/net/mlx5/mlx5.h | 5 +----
drivers/net/mlx5/mlx5_flow.c | 27 +++++----------------------
drivers/net/mlx5/windows/mlx5_os.c | 9 ---------
5 files changed, 13 insertions(+), 49 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 5e3ae9f..5740214 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1163,16 +1163,6 @@
err = -err;
goto error;
}
- /* Check relax ordering support. */
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- } else {
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- }
sh->rq_ts_format = config->hca_attr.rq_ts_format;
sh->sq_ts_format = config->hca_attr.sq_ts_format;
sh->qp_ts_format = config->hca_attr.qp_ts_format;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index abd7ff7..fb58631 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -469,17 +469,20 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
/**
* Destroy all the resources allocated for a counter memory management.
*
+ * @param[in] sh
+ * Pointer to mlx5_dev_ctx_shared object to free.
* @param[in] mng
* Pointer to the memory management structure.
*/
static void
-mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng)
+mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_counter_stats_mem_mng *mng)
{
uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data;
LIST_REMOVE(mng, next);
- claim_zero(mlx5_devx_cmd_destroy(mng->dm));
- claim_zero(mlx5_os_umem_dereg(mng->umem));
+ sh->share_cache.dereg_mr_cb(&mng->dm);
+ memset(&mng->dm, 0, sizeof(mng->dm));
mlx5_free(mem);
}
@@ -533,7 +536,7 @@ static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
}
mng = LIST_FIRST(&sh->cmng.mem_mngs);
while (mng) {
- mlx5_flow_destroy_counter_stat_mem_mng(mng);
+ mlx5_flow_destroy_counter_stat_mem_mng(sh, mng);
mng = LIST_FIRST(&sh->cmng.mem_mngs);
}
memset(&sh->cmng, 0, sizeof(sh->cmng));
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index e2eb4db..8e8727a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -422,8 +422,7 @@ struct mlx5_flow_counter_pool {
struct mlx5_counter_stats_mem_mng {
LIST_ENTRY(mlx5_counter_stats_mem_mng) next;
struct mlx5_counter_stats_raw *raws;
- struct mlx5_devx_obj *dm;
- void *umem;
+ struct mlx5_pmd_mr dm;
};
/* Raw memory structure for the counter statistics values of a pool. */
@@ -454,8 +453,6 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering_read;
- bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index d46fc33..afa8ab4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6717,7 +6717,6 @@ struct mlx5_meter_domains_infos *
static int
mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_devx_mkey_attr mkey_attr;
struct mlx5_counter_stats_mem_mng *mem_mng;
volatile struct flow_counter_stats *raw_data;
int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
@@ -6727,6 +6726,7 @@ struct mlx5_meter_domains_infos *
sizeof(struct mlx5_counter_stats_mem_mng);
size_t pgsize = rte_mem_page_size();
uint8_t *mem;
+ int ret;
int i;
if (pgsize == (size_t)-1) {
@@ -6741,26 +6741,9 @@ struct mlx5_meter_domains_infos *
}
mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
- mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mem_mng->umem) {
- rte_errno = errno;
- mlx5_free(mem);
- return -rte_errno;
- }
- mkey_attr.addr = (uintptr_t)mem;
- mkey_attr.size = size;
- mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
- mkey_attr.pd = sh->pdn;
- mkey_attr.log_entity_size = 0;
- mkey_attr.pg_access = 0;
- mkey_attr.klm_array = NULL;
- mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
- mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
- mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
- if (!mem_mng->dm) {
- mlx5_os_umem_dereg(mem_mng->umem);
+ ret = sh->share_cache.reg_mr_cb(sh->pd, (void *)mem, size,
+ &mem_mng->dm);
+ if (ret) {
rte_errno = errno;
mlx5_free(mem);
return -rte_errno;
@@ -6879,7 +6862,7 @@ struct mlx5_meter_domains_infos *
ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
MLX5_COUNTERS_PER_POOL,
NULL, NULL,
- pool->raw_hw->mem_mng->dm->id,
+ pool->raw_hw->mem_mng->dm.lkey,
(void *)(uintptr_t)
pool->raw_hw->data,
sh->devx_comp,
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 6f39276..3e72e36 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -466,15 +466,6 @@
err = -err;
goto error;
}
- /* Check relax ordering support. */
- sh->cmng.relaxed_ordering_read = 0;
- sh->cmng.relaxed_ordering_write = 0;
- if (!haswell_broadwell_cpu) {
- sh->cmng.relaxed_ordering_write =
- config->hca_attr.relaxed_ordering_write;
- sh->cmng.relaxed_ordering_read =
- config->hca_attr.relaxed_ordering_read;
- }
}
if (config->devx) {
uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
--
1.8.3.1
next prev parent reply other threads:[~2021-03-18 9:56 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-18 9:56 [dpdk-dev] [PATCH 0/2] adjusting mkey creations Michael Baum
2021-03-18 9:56 ` [dpdk-dev] [PATCH 1/2] net/mlx5: workaround ASO memory region creation Michael Baum
2021-03-18 9:56 ` Michael Baum [this message]
2021-03-25 8:52 ` [dpdk-dev] [PATCH 0/2] adjusting mkey creations Raslan Darawsheh
2021-03-29 8:14 ` Raslan Darawsheh
2021-04-26 12:48 ` [dpdk-dev] [PATCH v2] net/mlx5: workaround ASO memory region creation Michael Baum
2021-05-03 10:11 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1616061368-29768-3-git-send-email-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).