From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D64A7A0542; Mon, 6 Jun 2022 15:30:04 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 809914021E; Mon, 6 Jun 2022 15:30:04 +0200 (CEST) Received: from loongson.cn (mail.loongson.cn [114.242.206.163]) by mails.dpdk.org (Postfix) with ESMTP id BB53E4021E for ; Mon, 6 Jun 2022 15:30:02 +0200 (CEST) Received: from localhost.localdomain (unknown [10.2.5.185]) by mail.loongson.cn (Coremail) with SMTP id AQAAf9BxieTe_J1imxwXAA--.64917S3; Mon, 06 Jun 2022 21:10:59 +0800 (CST) From: Min Zhou To: thomas@monjalon.net, david.marchand@redhat.com, bruce.richardson@intel.com, anatoly.burakov@intel.com, qiming.yang@intel.com, Yuying.Zhang@intel.com, jgrajcia@cisco.com, konstantin.v.ananyev@yandex.ru Cc: dev@dpdk.org, maobibo@loongson.cn Subject: [v3 01/24] eal/loongarch: add atomic operations for LoongArch Date: Mon, 6 Jun 2022 21:10:31 +0800 Message-Id: <20220606131054.2097526-2-zhoumin@loongson.cn> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20220606131054.2097526-1-zhoumin@loongson.cn> References: <20220606131054.2097526-1-zhoumin@loongson.cn> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-CM-TRANSID: AQAAf9BxieTe_J1imxwXAA--.64917S3 X-Coremail-Antispam: 1UD129KBjvJXoWxKFy8tr18ZF1rJF4DKFW7urg_yoWxAFW5pr WfCrnFqanaqFy3Ga97Xr45Gw1rAw1I934jqrW5C34kZF12kw47Ja4xJryFqryUGa97urs8 GwsYkFW5Gry7GFJanT9S1TB71UUUUUUqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnUUvcSsGvfC2KfnxnUUI43ZEXa7xR_UUUUUUUUU== X-CM-SenderInfo: 52kr3ztlq6z05rqj20fqof0/ X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch adds architecture specific atomic operations for LoongArch architecture. These implementations use standard atomics of toolchain and heavily reference generic atomics codes. Signed-off-by: Min Zhou --- lib/eal/loongarch/include/rte_atomic.h | 253 +++++++++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 lib/eal/loongarch/include/rte_atomic.h diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h new file mode 100644 index 0000000000..8e007e7f76 --- /dev/null +++ b/lib/eal/loongarch/include/rte_atomic.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _RTE_ATOMIC_LOONGARCH_H_ +#define _RTE_ATOMIC_LOONGARCH_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "generic/rte_atomic.h" + +/** + * LoongArch Synchronize + */ +static inline void synchronize(void) +{ + __asm__ __volatile__("dbar 0":::"memory"); +} + +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + * This function is architecture dependent. + */ +#define rte_mb() synchronize() + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + * This function is architecture dependent. + */ +#define rte_wmb() synchronize() + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + * This function is architecture dependent. + */ +#define rte_rmb() synchronize() + +#define rte_smp_mb() rte_mb() + +#define rte_smp_wmb() rte_mb() + +#define rte_smp_rmb() rte_mb() + +#define rte_io_mb() rte_mb() + +#define rte_io_wmb() rte_mb() + +#define rte_io_rmb() rte_mb() + +static __rte_always_inline void +rte_atomic_thread_fence(int memorder) +{ + __atomic_thread_fence(memorder); +} + +#ifndef RTE_FORCE_INTRINSICS +/*------------------------- 16 bit atomic operations -------------------------*/ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + rte_atomic16_add(v, 1); +} + +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + rte_atomic16_sub(v, 1); +} + +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} + +/*------------------------- 32 bit atomic operations -------------------------*/ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + rte_atomic32_add(v, 1); +} + +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + rte_atomic32_sub(v, 1); +} + +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} + +/*------------------------- 64 bit atomic operations -------------------------*/ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ + return v->cnt; +} + +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ + v->cnt = new_value; +} + +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + __sync_fetch_and_add(&v->cnt, inc); +} + +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + __sync_fetch_and_sub(&v->cnt, dec); +} + +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + rte_atomic64_add(v, 1); +} + +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + rte_atomic64_sub(v, 1); +} + +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + return __sync_add_and_fetch(&v->cnt, inc); +} + +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + return __sync_sub_and_fetch(&v->cnt, dec); +} + +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_add_return(v, 1) == 0; +} + +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_sub_return(v, 1) == 0; +} + +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} + +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + rte_atomic64_set(v, 0); +} +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ATOMIC_LOONGARCH_H_ */ -- 2.31.1