From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (usa-sjc-mx-foss1.foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id C9FB45A for ; Wed, 20 Mar 2019 07:25:32 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 0DE3CEBD; Tue, 19 Mar 2019 23:25:32 -0700 (PDT) Received: from net-arm-thunderx2.shanghai.arm.com (net-arm-thunderx2.shanghai.arm.com [10.169.40.121]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 5AD663F575; Tue, 19 Mar 2019 23:25:30 -0700 (PDT) From: Joyce Kong To: dev@dpdk.org Cc: nd@arm.com, jerinj@marvell.com, konstantin.ananyev@intel.com, chaozhu@linux.vnet.ibm.com, bruce.richardson@intel.com, thomas@monjalon.net, hemant.agrawal@nxp.com, honnappa.nagarahalli@arm.com, gavin.hu@arm.com Date: Wed, 20 Mar 2019 14:25:07 +0800 Message-Id: <1553063109-57574-2-git-send-email-joyce.kong@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1553063109-57574-1-git-send-email-joyce.kong@arm.com> References: <1553063109-57574-1-git-send-email-joyce.kong@arm.com> In-Reply-To: <1544672265-219262-2-git-send-email-joyce.kong@arm.com> References: <1544672265-219262-2-git-send-email-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v4 1/3] rwlock: reimplement with atomic builtins X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 20 Mar 2019 06:25:33 -0000 The __sync builtin based implementation generates full memory barriers ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one way barriers. Here is the assembly code of __sync_compare_and_swap builtin. __sync_bool_compare_and_swap(dst, exp, src); 0x000000000090f1b0 <+16>: e0 07 40 f9 ldr x0, [sp, #8] 0x000000000090f1b4 <+20>: e1 0f 40 79 ldrh w1, [sp, #6] 0x000000000090f1b8 <+24>: e2 0b 40 79 ldrh w2, [sp, #4] 0x000000000090f1bc <+28>: 21 3c 00 12 and w1, w1, #0xffff 0x000000000090f1c0 <+32>: 03 7c 5f 48 ldxrh w3, [x0] 0x000000000090f1c4 <+36>: 7f 00 01 6b cmp w3, w1 0x000000000090f1c8 <+40>: 61 00 00 54 b.ne 0x90f1d4 // b.any 0x000000000090f1cc <+44>: 02 fc 04 48 stlxrh w4, w2, [x0] 0x000000000090f1d0 <+48>: 84 ff ff 35 cbnz w4, 0x90f1c0 0x000000000090f1d4 <+52>: bf 3b 03 d5 dmb ish 0x000000000090f1d8 <+56>: e0 17 9f 1a cset w0, eq // eq = none Signed-off-by: Gavin Hu Signed-off-by: Joyce Kong Tested-by: Joyce Kong Acked-by: Jerin Jacob --- lib/librte_eal/common/include/generic/rte_rwlock.h | 29 +++++++++++----------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h index b05d85a..de94ca9 100644 --- a/lib/librte_eal/common/include/generic/rte_rwlock.h +++ b/lib/librte_eal/common/include/generic/rte_rwlock.h @@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* write lock is held */ if (x < 0) { rte_pause(); continue; } - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - (uint32_t)x, (uint32_t)(x + 1)); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } } @@ -95,13 +95,14 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* write lock is held */ if (x < 0) return -EBUSY; - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - (uint32_t)x, (uint32_t)(x + 1)); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } + return 0; } @@ -114,7 +115,7 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl) static inline void rte_rwlock_read_unlock(rte_rwlock_t *rwl) { - rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt); + __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE); } /** @@ -135,9 +136,9 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl) { int32_t x; - x = rwl->cnt; - if (x != 0 || rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - 0, (uint32_t)-1) == 0) + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0) return -EBUSY; return 0; @@ -156,14 +157,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* a lock is held */ if (x != 0) { rte_pause(); continue; } - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - 0, (uint32_t)-1); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } } @@ -176,7 +177,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) static inline void rte_rwlock_write_unlock(rte_rwlock_t *rwl) { - rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt); + __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE); } /** -- 2.7.4 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 712F2A00E6 for ; Wed, 20 Mar 2019 07:25:36 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 99022493D; Wed, 20 Mar 2019 07:25:34 +0100 (CET) Received: from foss.arm.com (usa-sjc-mx-foss1.foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id C9FB45A for ; Wed, 20 Mar 2019 07:25:32 +0100 (CET) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 0DE3CEBD; Tue, 19 Mar 2019 23:25:32 -0700 (PDT) Received: from net-arm-thunderx2.shanghai.arm.com (net-arm-thunderx2.shanghai.arm.com [10.169.40.121]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 5AD663F575; Tue, 19 Mar 2019 23:25:30 -0700 (PDT) From: Joyce Kong To: dev@dpdk.org Cc: nd@arm.com, jerinj@marvell.com, konstantin.ananyev@intel.com, chaozhu@linux.vnet.ibm.com, bruce.richardson@intel.com, thomas@monjalon.net, hemant.agrawal@nxp.com, honnappa.nagarahalli@arm.com, gavin.hu@arm.com Date: Wed, 20 Mar 2019 14:25:07 +0800 Message-Id: <1553063109-57574-2-git-send-email-joyce.kong@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1553063109-57574-1-git-send-email-joyce.kong@arm.com> References: <1553063109-57574-1-git-send-email-joyce.kong@arm.com> In-Reply-To: <1544672265-219262-2-git-send-email-joyce.kong@arm.com> References: <1544672265-219262-2-git-send-email-joyce.kong@arm.com> Subject: [dpdk-dev] [PATCH v4 1/3] rwlock: reimplement with atomic builtins X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190320062507.6W6NWl3D1t6nSKszSjwDf1uIFEn3bWH3PmWI-n7vlOw@z> The __sync builtin based implementation generates full memory barriers ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one way barriers. Here is the assembly code of __sync_compare_and_swap builtin. __sync_bool_compare_and_swap(dst, exp, src); 0x000000000090f1b0 <+16>: e0 07 40 f9 ldr x0, [sp, #8] 0x000000000090f1b4 <+20>: e1 0f 40 79 ldrh w1, [sp, #6] 0x000000000090f1b8 <+24>: e2 0b 40 79 ldrh w2, [sp, #4] 0x000000000090f1bc <+28>: 21 3c 00 12 and w1, w1, #0xffff 0x000000000090f1c0 <+32>: 03 7c 5f 48 ldxrh w3, [x0] 0x000000000090f1c4 <+36>: 7f 00 01 6b cmp w3, w1 0x000000000090f1c8 <+40>: 61 00 00 54 b.ne 0x90f1d4 // b.any 0x000000000090f1cc <+44>: 02 fc 04 48 stlxrh w4, w2, [x0] 0x000000000090f1d0 <+48>: 84 ff ff 35 cbnz w4, 0x90f1c0 0x000000000090f1d4 <+52>: bf 3b 03 d5 dmb ish 0x000000000090f1d8 <+56>: e0 17 9f 1a cset w0, eq // eq = none Signed-off-by: Gavin Hu Signed-off-by: Joyce Kong Tested-by: Joyce Kong Acked-by: Jerin Jacob --- lib/librte_eal/common/include/generic/rte_rwlock.h | 29 +++++++++++----------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h index b05d85a..de94ca9 100644 --- a/lib/librte_eal/common/include/generic/rte_rwlock.h +++ b/lib/librte_eal/common/include/generic/rte_rwlock.h @@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* write lock is held */ if (x < 0) { rte_pause(); continue; } - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - (uint32_t)x, (uint32_t)(x + 1)); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } } @@ -95,13 +95,14 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* write lock is held */ if (x < 0) return -EBUSY; - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - (uint32_t)x, (uint32_t)(x + 1)); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } + return 0; } @@ -114,7 +115,7 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl) static inline void rte_rwlock_read_unlock(rte_rwlock_t *rwl) { - rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt); + __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE); } /** @@ -135,9 +136,9 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl) { int32_t x; - x = rwl->cnt; - if (x != 0 || rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - 0, (uint32_t)-1) == 0) + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0) return -EBUSY; return 0; @@ -156,14 +157,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) int success = 0; while (success == 0) { - x = rwl->cnt; + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); /* a lock is held */ if (x != 0) { rte_pause(); continue; } - success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - 0, (uint32_t)-1); + success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); } } @@ -176,7 +177,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) static inline void rte_rwlock_write_unlock(rte_rwlock_t *rwl) { - rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt); + __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE); } /** -- 2.7.4