From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id AB70BA045E for ; Wed, 29 May 2019 18:32:19 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 702EB1B9A1; Wed, 29 May 2019 18:31:30 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id EF1A81B95C for ; Wed, 29 May 2019 18:31:24 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 May 2019 09:31:24 -0700 X-ExtLoop1: 1 Received: from silpixa00399498.ir.intel.com (HELO silpixa00399498.ger.corp.intel.com) ([10.237.223.125]) by orsmga005.jf.intel.com with ESMTP; 29 May 2019 09:31:23 -0700 From: Anatoly Burakov To: dev@dpdk.org Cc: stephen@networkplumber.org, thomas@monjalon.net, david.marchand@redhat.com Date: Wed, 29 May 2019 17:30:52 +0100 Message-Id: <9c7c20340a166f9a40e6ed84ce0f73b7a944c802.1559147228.git.anatoly.burakov@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH 06/25] mem: use new memory locking API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Replace usages of direct access to shared memory config with calls to the new API. Signed-off-by: Anatoly Burakov --- lib/librte_eal/common/eal_common_memory.c | 43 ++++++++++------------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c index 3baa2f23a..db44078fd 100644 --- a/lib/librte_eal/common/eal_common_memory.c +++ b/lib/librte_eal/common/eal_common_memory.c @@ -596,13 +596,12 @@ rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg) int __rte_experimental rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); ret = rte_memseg_contig_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return ret; } @@ -638,13 +637,12 @@ rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg) int __rte_experimental rte_memseg_walk(rte_memseg_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); ret = rte_memseg_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return ret; } @@ -671,13 +669,12 @@ rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg) int __rte_experimental rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret = 0; /* do not allow allocations/frees/init while we iterate */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); ret = rte_memseg_list_walk_thread_unsafe(func, arg); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return ret; } @@ -755,12 +752,11 @@ rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms) int __rte_experimental rte_memseg_get_fd(const struct rte_memseg *ms) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); ret = rte_memseg_get_fd_thread_unsafe(ms); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return ret; } @@ -811,12 +807,11 @@ rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms, int __rte_experimental rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; int ret; - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset); - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return ret; } @@ -837,7 +832,7 @@ rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[], rte_errno = EINVAL; return -1; } - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_lock(); /* make sure the segment doesn't already exist */ if (malloc_heap_find_external_seg(va_addr, len) != NULL) { @@ -866,14 +861,13 @@ rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[], /* memseg list successfully created - increment next socket ID */ mcfg->next_socket_id++; unlock: - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_unlock(); return ret; } int __rte_experimental rte_extmem_unregister(void *va_addr, size_t len) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; struct rte_memseg_list *msl; int ret = 0; @@ -881,7 +875,7 @@ rte_extmem_unregister(void *va_addr, size_t len) rte_errno = EINVAL; return -1; } - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_lock(); /* find our segment */ msl = malloc_heap_find_external_seg(va_addr, len); @@ -893,14 +887,13 @@ rte_extmem_unregister(void *va_addr, size_t len) ret = malloc_heap_destroy_external_seg(msl); unlock: - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_unlock(); return ret; } static int sync_memory(void *va_addr, size_t len, bool attach) { - struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; struct rte_memseg_list *msl; int ret = 0; @@ -908,7 +901,7 @@ sync_memory(void *va_addr, size_t len, bool attach) rte_errno = EINVAL; return -1; } - rte_rwlock_write_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_lock(); /* find our segment */ msl = malloc_heap_find_external_seg(va_addr, len); @@ -923,7 +916,7 @@ sync_memory(void *va_addr, size_t len, bool attach) ret = rte_fbarray_detach(&msl->memseg_arr); unlock: - rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_write_unlock(); return ret; } @@ -951,7 +944,7 @@ rte_eal_memory_init(void) return -1; /* lock mem hotplug here, to prevent races while we init */ - rte_rwlock_read_lock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_lock(); if (rte_eal_memseg_init() < 0) goto fail; @@ -970,6 +963,6 @@ rte_eal_memory_init(void) return 0; fail: - rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock); + rte_eal_mcfg_mem_read_unlock(); return -1; } -- 2.17.1