From: Min Zhou <zhoumin@loongson.cn>
To: thomas@monjalon.net, david.marchand@redhat.com,
bruce.richardson@intel.com, anatoly.burakov@intel.com,
qiming.yang@intel.com, Yuying.Zhang@intel.com,
jgrajcia@cisco.com, konstantin.v.ananyev@yandex.ru
Cc: dev@dpdk.org, maobibo@loongson.cn
Subject: [PATCH v4 01/24] eal/loongarch: add atomic operations for LoongArch
Date: Thu, 21 Jul 2022 20:51:21 +0800 [thread overview]
Message-ID: <20220721125144.4028113-2-zhoumin@loongson.cn> (raw)
In-Reply-To: <20220721125144.4028113-1-zhoumin@loongson.cn>
This patch adds architecture specific atomic operations for
LoongArch architecture. These implementations use standard atomics
of toolchain and heavily reference generic atomics codes.
Signed-off-by: Min Zhou <zhoumin@loongson.cn>
---
lib/eal/loongarch/include/rte_atomic.h | 253 +++++++++++++++++++++++++
1 file changed, 253 insertions(+)
create mode 100644 lib/eal/loongarch/include/rte_atomic.h
diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h
new file mode 100644
index 0000000000..8e007e7f76
--- /dev/null
+++ b/lib/eal/loongarch/include/rte_atomic.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _RTE_ATOMIC_LOONGARCH_H_
+#define _RTE_ATOMIC_LOONGARCH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include "generic/rte_atomic.h"
+
+/**
+ * LoongArch Synchronize
+ */
+static inline void synchronize(void)
+{
+ __asm__ __volatile__("dbar 0":::"memory");
+}
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ * This function is architecture dependent.
+ */
+#define rte_mb() synchronize()
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ * This function is architecture dependent.
+ */
+#define rte_wmb() synchronize()
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ * This function is architecture dependent.
+ */
+#define rte_rmb() synchronize()
+
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_mb()
+
+#define rte_smp_rmb() rte_mb()
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_mb()
+
+#define rte_io_rmb() rte_mb()
+
+static __rte_always_inline void
+rte_atomic_thread_fence(int memorder)
+{
+ __atomic_thread_fence(memorder);
+}
+
+#ifndef RTE_FORCE_INTRINSICS
+/*------------------------- 16 bit atomic operations -------------------------*/
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+ rte_atomic16_add(v, 1);
+}
+
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+ rte_atomic16_sub(v, 1);
+}
+
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+ return __sync_add_and_fetch(&v->cnt, 1) == 0;
+}
+
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+ return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+}
+
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+ return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+ rte_atomic32_add(v, 1);
+}
+
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+ rte_atomic32_sub(v, 1);
+}
+
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+ return __sync_add_and_fetch(&v->cnt, 1) == 0;
+}
+
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+ return __sync_sub_and_fetch(&v->cnt, 1) == 0;
+}
+
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+ return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+ return __sync_bool_compare_and_swap(dst, exp, src);
+}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+#if defined(__clang__)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+ v->cnt = 0;
+}
+
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+ return v->cnt;
+}
+
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+ v->cnt = new_value;
+}
+
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+ __sync_fetch_and_add(&v->cnt, inc);
+}
+
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+ __sync_fetch_and_sub(&v->cnt, dec);
+}
+
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+ rte_atomic64_add(v, 1);
+}
+
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+ rte_atomic64_sub(v, 1);
+}
+
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+ return __sync_add_and_fetch(&v->cnt, inc);
+}
+
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+ return __sync_sub_and_fetch(&v->cnt, dec);
+}
+
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_add_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+ return rte_atomic64_sub_return(v, 1) == 0;
+}
+
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+ return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+ rte_atomic64_set(v, 0);
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_LOONGARCH_H_ */
--
2.31.1
next prev parent reply other threads:[~2022-07-21 12:52 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-21 12:51 [PATCH v4 00/24] Support LoongArch architecture Min Zhou
2022-07-21 12:51 ` Min Zhou [this message]
2022-07-21 12:51 ` [PATCH v4 02/24] eal/loongarch: add byte order operations for LoongArch Min Zhou
2022-07-21 12:51 ` [PATCH v4 03/24] eal/loongarch: add cpu cycle " Min Zhou
2022-07-21 12:51 ` [PATCH v4 04/24] eal/loongarch: add prefetch " Min Zhou
2022-07-21 12:51 ` [PATCH v4 05/24] eal/loongarch: add spinlock " Min Zhou
2022-07-21 12:51 ` [PATCH v4 06/24] eal/loongarch: add cpu flag checks " Min Zhou
2022-07-21 12:51 ` [PATCH v4 07/24] eal/loongarch: add dummy vector memcpy " Min Zhou
2022-07-21 12:51 ` [PATCH v4 08/24] eal/loongarch: add io operations " Min Zhou
2022-07-21 12:51 ` [PATCH v4 09/24] eal/loongarch: add mcslock " Min Zhou
2022-07-21 12:51 ` [PATCH v4 10/24] eal/loongarch: add pause " Min Zhou
2022-07-21 12:51 ` [PATCH v4 11/24] eal/loongarch: add pflock " Min Zhou
2022-07-21 12:51 ` [PATCH v4 12/24] eal/loongarch: add rwlock " Min Zhou
2022-07-21 12:51 ` [PATCH v4 13/24] eal/loongarch: add ticketlock " Min Zhou
2022-07-21 12:51 ` [PATCH v4 14/24] eal/loongarch: add power " Min Zhou
2022-07-21 12:51 ` [PATCH v4 15/24] eal/loongarch: add hypervisor " Min Zhou
2022-07-21 12:51 ` [PATCH v4 16/24] mem: add huge page size definition " Min Zhou
2022-07-21 12:51 ` [PATCH v4 17/24] eal/linux: set eal base address " Min Zhou
2022-07-21 12:51 ` [PATCH v4 18/24] meson: introduce LoongArch architecture Min Zhou
2022-07-21 12:51 ` [PATCH v4 19/24] test/xmmt_ops: add dummy vector implementation for LoongArch Min Zhou
2022-07-21 12:51 ` [PATCH v4 20/24] ixgbe: " Min Zhou
2022-07-21 12:51 ` [PATCH v4 21/24] i40e: " Min Zhou
2022-07-21 12:51 ` [PATCH v4 22/24] tap: add system call number " Min Zhou
2022-07-21 12:51 ` [PATCH v4 23/24] memif: " Min Zhou
2022-07-21 12:51 ` [PATCH v4 24/24] maintainers: claim responsibility " Min Zhou
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220721125144.4028113-2-zhoumin@loongson.cn \
--to=zhoumin@loongson.cn \
--cc=Yuying.Zhang@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=bruce.richardson@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=jgrajcia@cisco.com \
--cc=konstantin.v.ananyev@yandex.ru \
--cc=maobibo@loongson.cn \
--cc=qiming.yang@intel.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).