From: Alexander Kozyrev <akozyrev@nvidia.com>
To: dev@dpdk.org
Cc: rasland@nvidia.com, matan@nvidia.com, viacheslavo@nvidia.com
Subject: [dpdk-dev] [PATCH 1/4] common/mlx5: use C11 atomics for memory allocation
Date: Tue, 27 Oct 2020 15:28:21 +0000 [thread overview]
Message-ID: <20201027152824.15232-2-akozyrev@nvidia.com> (raw)
In-Reply-To: <20201027152824.15232-1-akozyrev@nvidia.com>
The rte_atomic API is deprecated and needs to be replaced with
C11 atomic builtins. Use the relaxed ordering for mlx5 mallocs.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/common/mlx5/mlx5_malloc.c | 91 ++++++++++++++++---------------
1 file changed, 47 insertions(+), 44 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_malloc.c b/drivers/common/mlx5/mlx5_malloc.c
index 44899717e0..f64c15fceb 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -8,8 +8,6 @@
#include <stdbool.h>
#include <string.h>
-#include <rte_atomic.h>
-
#include "mlx5_common_utils.h"
#include "mlx5_malloc.h"
@@ -17,27 +15,24 @@ struct mlx5_sys_mem {
uint32_t init:1; /* Memory allocator initialized. */
uint32_t enable:1; /* System memory select. */
uint32_t reserve:30; /* Reserve. */
- union {
- struct rte_memseg_list *last_msl;
- rte_atomic64_t a64_last_msl;
- };
+ struct rte_memseg_list *last_msl;
/* last allocated rte memory memseg list. */
#ifdef RTE_LIBRTE_MLX5_DEBUG
- rte_atomic64_t malloc_sys;
+ uint64_t malloc_sys;
/* Memory allocated from system count. */
- rte_atomic64_t malloc_rte;
+ uint64_t malloc_rte;
/* Memory allocated from hugepage count. */
- rte_atomic64_t realloc_sys;
+ uint64_t realloc_sys;
/* Memory reallocate from system count. */
- rte_atomic64_t realloc_rte;
+ uint64_t realloc_rte;
/* Memory reallocate from hugepage count. */
- rte_atomic64_t free_sys;
+ uint64_t free_sys;
/* Memory free to system count. */
- rte_atomic64_t free_rte;
+ uint64_t free_rte;
/* Memory free to hugepage count. */
- rte_atomic64_t msl_miss;
+ uint64_t msl_miss;
/* MSL miss count. */
- rte_atomic64_t msl_update;
+ uint64_t msl_update;
/* MSL update count. */
#endif
};
@@ -47,14 +42,14 @@ static struct mlx5_sys_mem mlx5_sys_mem = {
.init = 0,
.enable = 0,
#ifdef RTE_LIBRTE_MLX5_DEBUG
- .malloc_sys = RTE_ATOMIC64_INIT(0),
- .malloc_rte = RTE_ATOMIC64_INIT(0),
- .realloc_sys = RTE_ATOMIC64_INIT(0),
- .realloc_rte = RTE_ATOMIC64_INIT(0),
- .free_sys = RTE_ATOMIC64_INIT(0),
- .free_rte = RTE_ATOMIC64_INIT(0),
- .msl_miss = RTE_ATOMIC64_INIT(0),
- .msl_update = RTE_ATOMIC64_INIT(0),
+ .malloc_sys = 0,
+ .malloc_rte = 0,
+ .realloc_sys = 0,
+ .realloc_rte = 0,
+ .free_sys = 0,
+ .free_rte = 0,
+ .msl_miss = 0,
+ .msl_update = 0,
#endif
};
@@ -97,12 +92,14 @@ mlx5_mem_update_msl(void *addr)
* different with the cached msl.
*/
if (addr && !mlx5_mem_check_msl(addr,
- (struct rte_memseg_list *)(uintptr_t)rte_atomic64_read
- (&mlx5_sys_mem.a64_last_msl))) {
- rte_atomic64_set(&mlx5_sys_mem.a64_last_msl,
- (int64_t)(uintptr_t)rte_mem_virt2memseg_list(addr));
+ (struct rte_memseg_list *)__atomic_load_n
+ (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+ __atomic_store_n(&mlx5_sys_mem.last_msl,
+ rte_mem_virt2memseg_list(addr),
+ __ATOMIC_RELAXED);
#ifdef RTE_LIBRTE_MLX5_DEBUG
- rte_atomic64_inc(&mlx5_sys_mem.msl_update);
+ __atomic_add_fetch(&mlx5_sys_mem.msl_update, 1,
+ __ATOMIC_RELAXED);
#endif
}
}
@@ -123,12 +120,12 @@ mlx5_mem_is_rte(void *addr)
* Check if the last cache msl matches. Drop to slow path
* to check if the memory belongs to rte memory.
*/
- if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)(uintptr_t)
- rte_atomic64_read(&mlx5_sys_mem.a64_last_msl))) {
+ if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
+ __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
if (!rte_mem_virt2memseg_list(addr))
return false;
#ifdef RTE_LIBRTE_MLX5_DEBUG
- rte_atomic64_inc(&mlx5_sys_mem.msl_miss);
+ __atomic_add_fetch(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
#endif
}
return true;
@@ -190,7 +187,8 @@ mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket)
mlx5_mem_update_msl(addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- rte_atomic64_inc(&mlx5_sys_mem.malloc_rte);
+ __atomic_add_fetch(&mlx5_sys_mem->malloc_rte, 1,
+ __ATOMIC_RELAXED);
#endif
return addr;
}
@@ -203,7 +201,8 @@ mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket)
addr = malloc(size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (addr)
- rte_atomic64_inc(&mlx5_sys_mem.malloc_sys);
+ __atomic_add_fetch(&mlx5_sys_mem->malloc_sys, 1,
+ __ATOMIC_RELAXED);
#endif
return addr;
}
@@ -236,7 +235,8 @@ mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align,
mlx5_mem_update_msl(new_addr);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- rte_atomic64_inc(&mlx5_sys_mem.realloc_rte);
+ __atomic_add_fetch(&mlx5_sys_mem->realloc_rte, 1,
+ __ATOMIC_RELAXED);
#endif
return new_addr;
}
@@ -248,7 +248,8 @@ mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align,
new_addr = realloc(addr, size);
#ifdef RTE_LIBRTE_MLX5_DEBUG
if (new_addr)
- rte_atomic64_inc(&mlx5_sys_mem.realloc_sys);
+ __atomic_add_fetch(&mlx5_sys_mem->realloc_sys, 1,
+ __ATOMIC_RELAXED);
#endif
return new_addr;
}
@@ -260,12 +261,14 @@ mlx5_free(void *addr)
return;
if (!mlx5_mem_is_rte(addr)) {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- rte_atomic64_inc(&mlx5_sys_mem.free_sys);
+ __atomic_add_fetch(&mlx5_sys_mem->free_sys, 1,
+ __ATOMIC_RELAXED);
#endif
free(addr);
} else {
#ifdef RTE_LIBRTE_MLX5_DEBUG
- rte_atomic64_inc(&mlx5_sys_mem.free_rte);
+ __atomic_add_fetch(&mlx5_sys_mem->free_rte, 1,
+ __ATOMIC_RELAXED);
#endif
rte_free(addr);
}
@@ -279,14 +282,14 @@ mlx5_memory_stat_dump(void)
" free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
" realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
" update:%"PRIi64"",
- rte_atomic64_read(&mlx5_sys_mem.malloc_sys),
- rte_atomic64_read(&mlx5_sys_mem.realloc_sys),
- rte_atomic64_read(&mlx5_sys_mem.free_sys),
- rte_atomic64_read(&mlx5_sys_mem.malloc_rte),
- rte_atomic64_read(&mlx5_sys_mem.realloc_rte),
- rte_atomic64_read(&mlx5_sys_mem.free_rte),
- rte_atomic64_read(&mlx5_sys_mem.msl_miss),
- rte_atomic64_read(&mlx5_sys_mem.msl_update));
+ __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
+ __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
#endif
}
--
2.24.1
next prev parent reply other threads:[~2020-10-27 15:30 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-10-27 15:28 [dpdk-dev] [PATCH 0/4] net/mlx5: use C11 atomic builtins Alexander Kozyrev
2020-10-27 15:28 ` Alexander Kozyrev [this message]
2020-10-27 15:28 ` [dpdk-dev] [PATCH 2/4] common/mlx5: use C11 atomics for netlink sequence Alexander Kozyrev
2020-10-27 15:28 ` [dpdk-dev] [PATCH 3/4] net/mlx5: use C11 atomics for RxQ/TxQ refcounts Alexander Kozyrev
2020-10-27 15:28 ` [dpdk-dev] [PATCH 4/4] net/mlx5: use C11 atomics for RTE flow tables Alexander Kozyrev
2020-10-27 22:29 ` [dpdk-dev] [PATCH 0/4] net/mlx5: use C11 atomic builtins Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201027152824.15232-2-akozyrev@nvidia.com \
--to=akozyrev@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=rasland@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).