From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 689614410E; Thu, 30 May 2024 11:41:09 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id CA9C240E4A; Thu, 30 May 2024 11:40:58 +0200 (CEST) Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mails.dpdk.org (Postfix) with ESMTP id 71E3240DFD for ; Thu, 30 May 2024 11:40:55 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 2E3E51476; Thu, 30 May 2024 02:41:19 -0700 (PDT) Received: from ampere-altra-2-3.austin.arm.com (ampere-altra-2-3.austin.arm.com [10.118.14.97]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id E2F393F641; Thu, 30 May 2024 02:40:54 -0700 (PDT) From: Paul Szczepanek To: dev@dpdk.org Cc: mb@smartsharesystems.com, Paul Szczepanek , Jack Bond-Preston , Nathan Brown Subject: [PATCH v13 2/6] mempool: add functions to get extra mempool info Date: Thu, 30 May 2024 09:40:38 +0000 Message-Id: <20240530094042.1960212-3-paul.szczepanek@arm.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20240530094042.1960212-1-paul.szczepanek@arm.com> References: <20230927150854.3670391-2-paul.szczepanek@arm.com> <20240530094042.1960212-1-paul.szczepanek@arm.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add two functions: - rte_mempool_get_mem_range - get virtual memory range of the objects in the mempool, - rte_mempool_get_obj_alignment - get alignment of objects in the mempool. Add two tests that test these new functions. Signed-off-by: Paul Szczepanek Reviewed-by: Jack Bond-Preston Reviewed-by: Nathan Brown Acked-by: Morten Brørup --- app/test/test_mempool.c | 71 +++++++++++++++++++++++++++++++++++++++ lib/mempool/rte_mempool.c | 48 ++++++++++++++++++++++++++ lib/mempool/rte_mempool.h | 41 ++++++++++++++++++++++ lib/mempool/version.map | 3 ++ 4 files changed, 163 insertions(+) diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index ad7ebd6363..f32d4a3bb9 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -843,12 +843,17 @@ test_mempool(void) int ret = -1; uint32_t nb_objs = 0; uint32_t nb_mem_chunks = 0; + void *start = NULL; + size_t length = 0; + size_t alignment = 0; + bool ret_bool = false; struct rte_mempool *mp_cache = NULL; struct rte_mempool *mp_nocache = NULL; struct rte_mempool *mp_stack_anon = NULL; struct rte_mempool *mp_stack_mempool_iter = NULL; struct rte_mempool *mp_stack = NULL; struct rte_mempool *default_pool = NULL; + struct rte_mempool *mp_alignment = NULL; struct mp_data cb_arg = { .ret = -1 }; @@ -967,6 +972,71 @@ test_mempool(void) } rte_mempool_obj_iter(default_pool, my_obj_init, NULL); + if (rte_mempool_get_mem_range(default_pool, &start, &length, NULL)) { + printf("cannot get mem range from default mempool\n"); + GOTO_ERR(ret, err); + } + + if (rte_mempool_get_mem_range(NULL, NULL, NULL, NULL) != -EINVAL) { + printf("rte_mempool_get_mem_range failed to return -EINVAL " + "when passed invalid arguments\n"); + GOTO_ERR(ret, err); + } + + if (start == NULL || length < (MEMPOOL_SIZE * MEMPOOL_ELT_SIZE)) { + printf("mem range of default mempool is invalid\n"); + GOTO_ERR(ret, err); + } + + /* by default mempool objects are aligned by RTE_MEMPOOL_ALIGN */ + alignment = rte_mempool_get_obj_alignment(default_pool); + if (alignment != RTE_MEMPOOL_ALIGN) { + printf("rte_mempool_get_obj_alignment returned wrong value, " + "expected %zu, returned %zu\n", + (size_t)RTE_MEMPOOL_ALIGN, alignment); + GOTO_ERR(ret, err); + } + + /* create a mempool with a RTE_MEMPOOL_F_NO_CACHE_ALIGN flag */ + mp_alignment = rte_mempool_create("test_alignment", + 1, 8, /* the small size guarantees single memory chunk */ + 0, 0, NULL, NULL, my_obj_init, NULL, + SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_CACHE_ALIGN); + + if (mp_alignment == NULL) { + printf("cannot allocate mempool with " + "RTE_MEMPOOL_F_NO_CACHE_ALIGN flag\n"); + GOTO_ERR(ret, err); + } + + /* mempool was created with RTE_MEMPOOL_F_NO_CACHE_ALIGN + * and minimum alignment is expected which is sizeof(uint64_t) + */ + alignment = rte_mempool_get_obj_alignment(mp_alignment); + if (alignment != sizeof(uint64_t)) { + printf("rte_mempool_get_obj_alignment returned wrong value, " + "expected %zu, returned %zu\n", + (size_t)sizeof(uint64_t), alignment); + GOTO_ERR(ret, err); + } + + alignment = rte_mempool_get_obj_alignment(NULL); + if (alignment != 0) { + printf("rte_mempool_get_obj_alignment failed to return 0 for " + " an invalid mempool\n"); + GOTO_ERR(ret, err); + } + + if (rte_mempool_get_mem_range(mp_alignment, NULL, NULL, &ret_bool)) { + printf("cannot get mem range from mempool\n"); + GOTO_ERR(ret, err); + } + + if (!ret_bool) { + printf("mempool not contiguous\n"); + GOTO_ERR(ret, err); + } + /* retrieve the mempool from its name */ if (rte_mempool_lookup("test_nocache") != mp_nocache) { printf("Cannot lookup mempool from its name\n"); @@ -1039,6 +1109,7 @@ test_mempool(void) rte_mempool_free(mp_stack_mempool_iter); rte_mempool_free(mp_stack); rte_mempool_free(default_pool); + rte_mempool_free(mp_alignment); return ret; } diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c index 12390a2c81..b2551572ed 100644 --- a/lib/mempool/rte_mempool.c +++ b/lib/mempool/rte_mempool.c @@ -1386,6 +1386,54 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), rte_mcfg_mempool_read_unlock(); } +int rte_mempool_get_mem_range(struct rte_mempool *mp, + void **mem_range_start, size_t *mem_range_length, + bool *contiguous) +{ + if (mp == NULL) + return -EINVAL; + + void *address_low = (void *)UINTPTR_MAX; + void *address_high = 0; + size_t address_diff = 0; + size_t mem_total_size = 0; + struct rte_mempool_memhdr *hdr; + + /* go through memory chunks and find the lowest and highest addresses */ + STAILQ_FOREACH(hdr, &mp->mem_list, next) { + if (address_low > hdr->addr) + address_low = hdr->addr; + if (address_high < RTE_PTR_ADD(hdr->addr, hdr->len)) + address_high = RTE_PTR_ADD(hdr->addr, hdr->len); + mem_total_size += hdr->len; + } + + /* check if mempool was not populated yet (no memory chunks) */ + if (address_low == (void *)UINTPTR_MAX) + return -EINVAL; + + address_diff = (size_t)RTE_PTR_DIFF(address_high, address_low); + if (mem_range_start != NULL) + *mem_range_start = address_low; + if (mem_range_length != NULL) + *mem_range_length = address_diff; + if (contiguous != NULL) + *contiguous = (mem_total_size == address_diff) ? true : false; + + return 0; +} + +size_t rte_mempool_get_obj_alignment(struct rte_mempool *mp) +{ + if (mp == NULL) + return 0; + + if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) + return sizeof(uint64_t); + else + return RTE_MEMPOOL_ALIGN; +} + struct mempool_callback_data { TAILQ_ENTRY(mempool_callback_data) callbacks; rte_mempool_event_callback *func; diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index 23fd5c8465..8a97814b39 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -1917,6 +1917,47 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), void *arg); +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Get information about the memory range used by the mempool. + * + * @param[in] mp + * Pointer to an initialized mempool. + * @param[out] mem_range_start + * Returns lowest address in mempool. May be NULL. + * @param[out] mem_range_length + * Returns the length of the memory range containing all the + * virtual addresses in the memory pool. May be NULL. + * @param[out] contiguous + * Returns true if virtual addresses in the memory allocated for the + * mempool are contiguous. May be NULL. + * @return + * 0 on success, -EINVAL if mempool is not valid. + * + **/ +__rte_experimental +int rte_mempool_get_mem_range(struct rte_mempool *mp, + void **mem_range_start, size_t *mem_range_length, + bool *contiguous); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Return object alignment. + * + * @param[in] mp + * Pointer to a mempool. + * + * @return + * Object alignment if mp is valid. 0 if mp is NULL. + * + **/ +__rte_experimental +size_t rte_mempool_get_obj_alignment(struct rte_mempool *mp); + /** * @internal Get page size used for mempool object allocation. * This function is internal to mempool library and mempool drivers. diff --git a/lib/mempool/version.map b/lib/mempool/version.map index 473277400c..02df634b2a 100644 --- a/lib/mempool/version.map +++ b/lib/mempool/version.map @@ -50,6 +50,9 @@ EXPERIMENTAL { __rte_mempool_trace_get_contig_blocks; __rte_mempool_trace_default_cache; __rte_mempool_trace_cache_flush; + # added in 24.07 + rte_mempool_get_mem_range; + rte_mempool_get_obj_alignment; }; INTERNAL { -- 2.25.1