From: Chao Zhu <bjzhuc@cn.ibm.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 1/7] Split atomic operations to architecture specific
Date: Fri, 26 Sep 2014 05:33:32 -0400 [thread overview]
Message-ID: <1411724018-7738-2-git-send-email-bjzhuc@cn.ibm.com> (raw)
In-Reply-To: <1411724018-7738-1-git-send-email-bjzhuc@cn.ibm.com>
This patch splits the atomic operations from DPDK and push them to
architecture specific arch directories, so that other processor
architecture to support DPDK can be easily adopted.
Signed-off-by: Chao Zhu <bjzhuc@cn.ibm.com>
---
lib/librte_eal/common/Makefile | 2 +-
.../common/include/i686/arch/rte_atomic_arch.h | 378 ++++++++++++++++++++
lib/librte_eal/common/include/rte_atomic.h | 172 +--------
.../common/include/x86_64/arch/rte_atomic_arch.h | 378 ++++++++++++++++++++
4 files changed, 772 insertions(+), 158 deletions(-)
create mode 100644 lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
create mode 100644 lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index 7f27966..d730de5 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -46,7 +46,7 @@ ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
INC += rte_warnings.h
endif
-ARCH_INC := rte_atomic.h
+ARCH_INC := rte_atomic.h rte_atomic_arch.h
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/arch := \
diff --git a/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h b/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
new file mode 100644
index 0000000..cb2d91d
--- /dev/null
+++ b/lib/librte_eal/common/include/i686/arch/rte_atomic_arch.h
@@ -0,0 +1,378 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic <rte_atomic.h>"
+#endif
+
+#ifndef _RTE_ATOMIC_ARCH_H_
+#define _RTE_ATOMIC_ARCH_H_
+
+#include <stdint.h>
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define rte_arch_mb() _mm_mfence()
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define rte_arch_wmb() _mm_sfence()
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define rte_arch_rmb() _mm_lfence()
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define rte_arch_compiler_barrier() do { \
+ asm volatile ("" : : : "memory"); \
+} while(0)
+
+#include <emmintrin.h>
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 16-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgw %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+#else
+ return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_inc(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic16_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_dec(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "decw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic16_sub(v, 1);
+#endif
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 32-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgl %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+#else
+ return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_inc(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic32_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_dec(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "decl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic32_sub(v,1);
+#endif
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+#endif /* _RTE_ATOMIC_ARCH_H_ */
+
diff --git a/lib/librte_eal/common/include/rte_atomic.h b/lib/librte_eal/common/include/rte_atomic.h
index a5b6eec..24ba5d0 100644
--- a/lib/librte_eal/common/include/rte_atomic.h
+++ b/lib/librte_eal/common/include/rte_atomic.h
@@ -49,13 +49,7 @@
extern "C" {
#endif
-#include <stdint.h>
-
-#if RTE_MAX_LCORE == 1
-#define MPLOCKED /**< No need to insert MP lock prefix. */
-#else
-#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
-#endif
+#include "arch/rte_atomic_arch.h"
/**
* General memory barrier.
@@ -63,7 +57,7 @@ extern "C" {
* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
*/
-#define rte_mb() _mm_mfence()
+#define rte_mb() rte_arch_mb()
/**
* Write memory barrier.
@@ -71,7 +65,7 @@ extern "C" {
* Guarantees that the STORE operations generated before the barrier
* occur before the STORE operations generated after.
*/
-#define rte_wmb() _mm_sfence()
+#define rte_wmb() rte_arch_wmb()
/**
* Read memory barrier.
@@ -79,7 +73,7 @@ extern "C" {
* Guarantees that the LOAD operations generated before the barrier
* occur before the LOAD operations generated after.
*/
-#define rte_rmb() _mm_lfence()
+#define rte_rmb() rte_arch_rmb()
/**
* Compiler barrier.
@@ -87,11 +81,7 @@ extern "C" {
* Guarantees that operation reordering does not occur at compile time
* for operations directly before and after the barrier.
*/
-#define rte_compiler_barrier() do { \
- asm volatile ("" : : : "memory"); \
-} while(0)
-
-#include <emmintrin.h>
+#define rte_compiler_barrier() rte_arch_compiler_barrier()
/**
* @file
@@ -119,33 +109,10 @@ extern "C" {
static inline int
rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t res;
-
- asm volatile(
- MPLOCKED
- "cmpxchgw %[src], %[dst];"
- "sete %[res];"
- : [res] "=a" (res), /* output */
- [dst] "=m" (*dst)
- : [src] "r" (src), /* input */
- "a" (exp),
- "m" (*dst)
- : "memory"); /* no-clobber list */
- return res;
-#else
- return __sync_bool_compare_and_swap(dst, exp, src);
-#endif
+ return rte_arch_atomic16_cmpset(dst, exp, src);
}
/**
- * The atomic counter structure.
- */
-typedef struct {
- volatile int16_t cnt; /**< An internal counter value. */
-} rte_atomic16_t;
-
-/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC16_INIT(val) { (val) }
@@ -227,16 +194,7 @@ rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
static inline void
rte_atomic16_inc(rte_atomic16_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- asm volatile(
- MPLOCKED
- "incw %[cnt]"
- : [cnt] "=m" (v->cnt) /* output */
- : "m" (v->cnt) /* input */
- );
-#else
- rte_atomic16_add(v, 1);
-#endif
+ rte_arch_atomic16_inc(v);
}
/**
@@ -248,16 +206,7 @@ rte_atomic16_inc(rte_atomic16_t *v)
static inline void
rte_atomic16_dec(rte_atomic16_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- asm volatile(
- MPLOCKED
- "decw %[cnt]"
- : [cnt] "=m" (v->cnt) /* output */
- : "m" (v->cnt) /* input */
- );
-#else
- rte_atomic16_sub(v, 1);
-#endif
+ rte_arch_atomic16_dec(v);
}
/**
@@ -312,20 +261,7 @@ rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
*/
static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t ret;
-
- asm volatile(
- MPLOCKED
- "incw %[cnt] ; "
- "sete %[ret]"
- : [cnt] "+m" (v->cnt), /* output */
- [ret] "=qm" (ret)
- );
- return (ret != 0);
-#else
- return (__sync_add_and_fetch(&v->cnt, 1) == 0);
-#endif
+ return rte_arch_atomic16_inc_and_test(v);
}
/**
@@ -341,19 +277,7 @@ static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
*/
static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t ret;
-
- asm volatile(MPLOCKED
- "decw %[cnt] ; "
- "sete %[ret]"
- : [cnt] "+m" (v->cnt), /* output */
- [ret] "=qm" (ret)
- );
- return (ret != 0);
-#else
- return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
-#endif
+ return rte_arch_atomic16_dec_and_test(v);
}
/**
@@ -404,33 +328,10 @@ static inline void rte_atomic16_clear(rte_atomic16_t *v)
static inline int
rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t res;
-
- asm volatile(
- MPLOCKED
- "cmpxchgl %[src], %[dst];"
- "sete %[res];"
- : [res] "=a" (res), /* output */
- [dst] "=m" (*dst)
- : [src] "r" (src), /* input */
- "a" (exp),
- "m" (*dst)
- : "memory"); /* no-clobber list */
- return res;
-#else
- return __sync_bool_compare_and_swap(dst, exp, src);
-#endif
+ return rte_arch_atomic32_cmpset(dst, exp, src);
}
/**
- * The atomic counter structure.
- */
-typedef struct {
- volatile int32_t cnt; /**< An internal counter value. */
-} rte_atomic32_t;
-
-/**
* Static initializer for an atomic counter.
*/
#define RTE_ATOMIC32_INIT(val) { (val) }
@@ -512,16 +413,7 @@ rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
static inline void
rte_atomic32_inc(rte_atomic32_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- asm volatile(
- MPLOCKED
- "incl %[cnt]"
- : [cnt] "=m" (v->cnt) /* output */
- : "m" (v->cnt) /* input */
- );
-#else
- rte_atomic32_add(v, 1);
-#endif
+ rte_arch_atomic32_inc(v);
}
/**
@@ -533,16 +425,7 @@ rte_atomic32_inc(rte_atomic32_t *v)
static inline void
rte_atomic32_dec(rte_atomic32_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- asm volatile(
- MPLOCKED
- "decl %[cnt]"
- : [cnt] "=m" (v->cnt) /* output */
- : "m" (v->cnt) /* input */
- );
-#else
- rte_atomic32_sub(v,1);
-#endif
+ rte_arch_atomic32_dec(v);
}
/**
@@ -597,20 +480,7 @@ rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
*/
static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t ret;
-
- asm volatile(
- MPLOCKED
- "incl %[cnt] ; "
- "sete %[ret]"
- : [cnt] "+m" (v->cnt), /* output */
- [ret] "=qm" (ret)
- );
- return (ret != 0);
-#else
- return (__sync_add_and_fetch(&v->cnt, 1) == 0);
-#endif
+ return rte_arch_atomic32_inc_and_test(v);
}
/**
@@ -626,19 +496,7 @@ static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
*/
static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
{
-#ifndef RTE_FORCE_INTRINSICS
- uint8_t ret;
-
- asm volatile(MPLOCKED
- "decl %[cnt] ; "
- "sete %[ret]"
- : [cnt] "+m" (v->cnt), /* output */
- [ret] "=qm" (ret)
- );
- return (ret != 0);
-#else
- return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
-#endif
+ return rte_arch_atomic32_dec_and_test(v);
}
/**
diff --git a/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h b/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h
new file mode 100644
index 0000000..cb2d91d
--- /dev/null
+++ b/lib/librte_eal/common/include/x86_64/arch/rte_atomic_arch.h
@@ -0,0 +1,378 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic <rte_atomic.h>"
+#endif
+
+#ifndef _RTE_ATOMIC_ARCH_H_
+#define _RTE_ATOMIC_ARCH_H_
+
+#include <stdint.h>
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define rte_arch_mb() _mm_mfence()
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define rte_arch_wmb() _mm_sfence()
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define rte_arch_rmb() _mm_lfence()
+
+/**
+ * Compiler barrier.
+ *
+ * Guarantees that operation reordering does not occur at compile time
+ * for operations directly before and after the barrier.
+ */
+#define rte_arch_compiler_barrier() do { \
+ asm volatile ("" : : : "memory"); \
+} while(0)
+
+#include <emmintrin.h>
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 16-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgw %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+#else
+ return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_inc(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic16_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic16_dec(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "decw %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic16_sub(v, 1);
+#endif
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decw %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+ volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ * if (*dst == exp)
+ * *dst = src (all 32-bit words)
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param exp
+ * The expected value.
+ * @param src
+ * The new value.
+ * @return
+ * Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_arch_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t res;
+
+ asm volatile(
+ MPLOCKED
+ "cmpxchgl %[src], %[dst];"
+ "sete %[res];"
+ : [res] "=a" (res), /* output */
+ [dst] "=m" (*dst)
+ : [src] "r" (src), /* input */
+ "a" (exp),
+ "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return res;
+#else
+ return __sync_bool_compare_and_swap(dst, exp, src);
+#endif
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_inc(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic32_add(v, 1);
+#endif
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ */
+static inline void
+rte_arch_atomic32_dec(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ asm volatile(
+ MPLOCKED
+ "decl %[cnt]"
+ : [cnt] "=m" (v->cnt) /* output */
+ : "m" (v->cnt) /* input */
+ );
+#else
+ rte_atomic32_sub(v,1);
+#endif
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(
+ MPLOCKED
+ "incl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ * A pointer to the atomic counter.
+ * @return
+ * True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_arch_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ uint8_t ret;
+
+ asm volatile(MPLOCKED
+ "decl %[cnt] ; "
+ "sete %[ret]"
+ : [cnt] "+m" (v->cnt), /* output */
+ [ret] "=qm" (ret)
+ );
+ return (ret != 0);
+#else
+ return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+#endif
+}
+
+#endif /* _RTE_ATOMIC_ARCH_H_ */
+
--
1.7.1
next prev parent reply other threads:[~2014-09-26 9:27 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-26 9:33 [dpdk-dev] [PATCH 0/7] Patches to split architecture specific operations from DPDK Chao Zhu
2014-09-26 9:33 ` Chao Zhu [this message]
2014-09-29 11:05 ` [dpdk-dev] [PATCH 1/7] Split atomic operations to architecture specific Bruce Richardson
2014-09-29 15:24 ` Neil Horman
2014-09-30 2:18 ` Chao CH Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 2/7] Split byte order " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 3/7] Split CPU cycle operation " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 4/7] Split prefetch operations " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 5/7] Split spinlock " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 6/7] Split memcpy operation " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 7/7] Split CPU flags operations " Chao Zhu
2014-10-03 13:21 ` [dpdk-dev] [PATCH 0/7] Patches to split architecture specific operations from DPDK David Marchand
2014-10-03 13:29 ` Bruce Richardson
2014-10-13 2:36 ` Chao CH Zhu
2014-10-06 21:46 ` Cyril Chemparathy
2014-10-12 9:14 ` Chao CH Zhu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1411724018-7738-2-git-send-email-bjzhuc@cn.ibm.com \
--to=bjzhuc@cn.ibm.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).