DPDK patches and discussions
 help / color / mirror / Atom feed
From: Feifei Wang <feifei.wang2@arm.com>
To: Ruifeng Wang <ruifeng.wang@arm.com>
Cc: dev@dpdk.org, nd@arm.com, Feifei Wang <feifei.wang2@arm.com>
Subject: [dpdk-dev] [RFC PATCH v1 1/5] eal: add new API for wait until scheme
Date: Thu,  2 Sep 2021 13:32:49 +0800	[thread overview]
Message-ID: <20210902053253.3017858-2-feifei.wang2@arm.com> (raw)
In-Reply-To: <20210902053253.3017858-1-feifei.wang2@arm.com>

For 'wait until' scheme, add new APIs for more cases:
1. add wait_until_unequal API
2. add wait_until_part_equal API

Signed-off-by: Feifei Wang <feifei.wang2@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 lib/eal/arm/include/rte_pause_64.h  | 271 +++++++++++++++++++-----
 lib/eal/include/generic/rte_pause.h | 309 ++++++++++++++++++++++++++++
 2 files changed, 526 insertions(+), 54 deletions(-)

diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h
index e87d10b8cc..19716276fc 100644
--- a/lib/eal/arm/include/rte_pause_64.h
+++ b/lib/eal/arm/include/rte_pause_64.h
@@ -31,20 +31,12 @@ static inline void rte_pause(void)
 /* Put processor into low power WFE(Wait For Event) state. */
 #define __WFE() { asm volatile("wfe" : : : "memory"); }
 
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
-		int memorder)
-{
-	uint16_t value;
-
-	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
-	/*
-	 * Atomic exclusive load from addr, it returns the 16-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
+/*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
 #define __LOAD_EXC_16(src, dst, memorder) {               \
 	if (memorder == __ATOMIC_RELAXED) {               \
 		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \
@@ -58,6 +50,52 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
 			: "memory");                      \
 	} }
 
+/*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_32(src, dst, memorder) {              \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} }
+
+/*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and a event is generated
+ * implicitly to exit WFE.
+ */
+#define __LOAD_EXC_64(src, dst, memorder) {              \
+	if (memorder == __ATOMIC_RELAXED) {              \
+		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} else {                                         \
+		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+			: [tmp] "=&r" (dst)              \
+			: [addr] "r"(src)                \
+			: "memory");                     \
+	} }
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+		int memorder)
+{
+	uint16_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
 	__LOAD_EXC_16(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -66,7 +104,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
 			__LOAD_EXC_16(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_16
 }
 
 static __rte_always_inline void
@@ -77,25 +114,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
 
 	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
 
-	/*
-	 * Atomic exclusive load from addr, it returns the 32-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_32(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
-
 	__LOAD_EXC_32(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -104,7 +122,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
 			__LOAD_EXC_32(addr, value, memorder)
 		} while (value != expected);
 	}
-#undef __LOAD_EXC_32
 }
 
 static __rte_always_inline void
@@ -115,25 +132,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 
 	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
 
-	/*
-	 * Atomic exclusive load from addr, it returns the 64-bit content of
-	 * *addr while making it 'monitored',when it is written by someone
-	 * else, the 'monitored' state is cleared and a event is generated
-	 * implicitly to exit WFE.
-	 */
-#define __LOAD_EXC_64(src, dst, memorder) {              \
-	if (memorder == __ATOMIC_RELAXED) {              \
-		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} else {                                         \
-		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
-			: [tmp] "=&r" (dst)              \
-			: [addr] "r"(src)                \
-			: "memory");                     \
-	} }
-
 	__LOAD_EXC_64(addr, value, memorder)
 	if (value != expected) {
 		__SEVL()
@@ -143,6 +141,171 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 		} while (value != expected);
 	}
 }
+
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t expected, int memorder)
+{
+	uint16_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_16(addr, value, memorder)
+	if ((value & mask) != expected) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_16(addr, value, memorder)
+		} while ((value & mask) != expected);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t expected, int memorder)
+{
+	uint32_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_32(addr, value, memorder)
+	if ((value & mask) != expected) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_32(addr, value, memorder)
+		} while ((value & mask) != expected);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t expected, int memorder)
+{
+	uint64_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_64(addr, value, memorder)
+	if ((value & mask) != expected) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_64(addr, value, memorder)
+		} while ((value & mask) != expected);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+		int memorder)
+{
+	uint16_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_16(addr, value, memorder)
+	if (value == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_16(addr, value, memorder)
+		} while (value == original);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+		int memorder)
+{
+	uint32_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_32(addr, value, memorder)
+	if (value == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_32(addr, value, memorder)
+		} while (value == original);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+		int memorder)
+{
+	uint64_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_64(addr, value, memorder)
+	if (value == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_64(addr, value, memorder)
+		} while (value == original);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t original, int memorder)
+{
+	uint16_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_16(addr, value, memorder)
+	if ((value & mask) == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_16(addr, value, memorder)
+		} while ((value & mask) == original);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t original, int memorder)
+{
+	uint32_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_32(addr, value, memorder)
+	if ((value & mask) == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_32(addr, value, memorder)
+		} while ((value & mask) == original);
+	}
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t original, int memorder)
+{
+	uint64_t value;
+
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	__LOAD_EXC_64(addr, value, memorder)
+	if ((value & mask) == original) {
+		__SEVL()
+		do {
+			__WFE()
+			__LOAD_EXC_64(addr, value, memorder)
+		} while ((value & mask) == original);
+	}
+}
+
+#undef __LOAD_EXC_16
+#undef __LOAD_EXC_32
 #undef __LOAD_EXC_64
 
 #undef __SEVL
diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h
index 668ee4a184..943a886f01 100644
--- a/lib/eal/include/generic/rte_pause.h
+++ b/lib/eal/include/generic/rte_pause.h
@@ -81,6 +81,222 @@ static __rte_always_inline void
 rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 		int memorder);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 16-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ *  A 16-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 32-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ *  A 32-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be equal with a 64-bit expected value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param expected
+ *  A 64-bit expected value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t expected, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 16-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param original
+ *  A 16-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+		int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 32-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param original
+ *  A 32-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+		int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for *addr to be unequal with a 64-bit original value, with a relaxed
+ * memory ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param original
+ *  A 64-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+		int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 16-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ *  A 16-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t original, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 32-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ *  A 32-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t original, int memorder);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Wait for part bits of *addr to be unequal with a 64-bit original value, with
+ * a relaxed memory ordering model meaning the loads around this API can be
+ * reordered.
+ *
+ * @param addr
+ *  A pointer to the memory location.
+ * @param mask
+ * value mask of a specific location
+ * @param original
+ *  A 64-bit original value to be in the memory location.
+ * @param memorder
+ *  Two different memory orders that can be specified:
+ *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ *  C++11 memory orders with the same names, see the C++11 standard or
+ *  the GCC wiki on atomic synchronization for detailed definition.
+ */
+__rte_experimental
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t original, int memorder);
+
 #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
 static __rte_always_inline void
 rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
@@ -111,6 +327,99 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
 	while (__atomic_load_n(addr, memorder) != expected)
 		rte_pause();
 }
+
+static __rte_always_inline void
+rte_wait_until_part_equal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t expected, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) != expected)
+		rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t expected, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) != expected)
+		rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_part_equal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t expected, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) != expected)
+		rte_pause();
+
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_16(volatile uint16_t *addr, uint16_t original,
+		int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while (__atomic_load_n(addr, memorder) == original)
+		rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_32(volatile uint32_t *addr, uint32_t original,
+		int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while (__atomic_load_n(addr, memorder) == original)
+		rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_unequal_64(volatile uint64_t *addr, uint64_t original,
+		int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while (__atomic_load_n(addr, memorder) == original)
+		rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_16(volatile uint16_t *addr, uint16_t mask,
+		uint16_t original, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) == original)
+		rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_32(volatile uint32_t *addr, uint32_t mask,
+		uint32_t original, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) == original)
+		rte_pause();
+}
+
+static __rte_always_inline void
+rte_wait_until_part_unequal_64(volatile uint64_t *addr, uint64_t mask,
+		uint64_t original, int memorder)
+{
+	assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+	while ((__atomic_load_n(addr, memorder) & mask) == original)
+		rte_pause();
+}
 #endif
 
 #endif /* _RTE_PAUSE_H_ */
-- 
2.25.1


  reply	other threads:[~2021-09-02  5:33 UTC|newest]

Thread overview: 113+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  5:32 [dpdk-dev] [RFC PATCH v1 0/5] " Feifei Wang
2021-09-02  5:32 ` Feifei Wang [this message]
2021-09-02  5:32 ` [dpdk-dev] [RFC PATCH v1 2/5] eal: use wait until scheme for read pflock Feifei Wang
2021-09-02  5:32 ` [dpdk-dev] [RFC PATCH v1 3/5] eal: use wait until scheme for mcslock Feifei Wang
2021-09-02  5:32 ` [dpdk-dev] [RFC PATCH v1 4/5] lib/bpf: use wait until scheme for Rx/Tx iteration Feifei Wang
2021-09-02  5:32 ` [dpdk-dev] [RFC PATCH v1 5/5] lib/distributor: use wait until scheme Feifei Wang
2021-09-02 15:22 ` [dpdk-dev] [RFC PATCH v1 0/5] add new API for " Stephen Hemminger
2021-09-03  7:02   ` [dpdk-dev] 回复: " Feifei Wang
2021-09-23  9:58 ` [dpdk-dev] [RFC PATCH v2 0/5] add new definitions for wait scheme Feifei Wang
2021-09-23  9:58   ` [dpdk-dev] [RFC PATCH v2 1/5] eal: " Feifei Wang
2021-09-23  9:58   ` [dpdk-dev] [RFC PATCH v2 2/5] eal: use wait event for read pflock Feifei Wang
2021-09-23  9:59   ` [dpdk-dev] [RFC PATCH v2 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-09-23  9:59   ` [dpdk-dev] [RFC PATCH v2 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-09-24 18:07     ` Ananyev, Konstantin
2021-09-26  2:19       ` [dpdk-dev] 回复: " Feifei Wang
2021-09-23  9:59   ` [dpdk-dev] [RFC PATCH v2 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-09-26  6:32 ` [dpdk-dev] [RFC PATCH v3 0/5] add new definitions for wait scheme Feifei Wang
2021-09-26  6:32   ` [dpdk-dev] [RFC PATCH v3 1/5] eal: " Feifei Wang
2021-10-07 16:18     ` Ananyev, Konstantin
2021-10-12  8:09       ` [dpdk-dev] 回复: " Feifei Wang
2021-10-13 15:03         ` [dpdk-dev] " Ananyev, Konstantin
2021-10-13 17:00           ` Stephen Hemminger
2021-10-14  3:14             ` [dpdk-dev] 回复: " Feifei Wang
2021-10-14  3:08           ` Feifei Wang
2021-09-26  6:32   ` [dpdk-dev] [RFC PATCH v3 2/5] eal: use wait event for read pflock Feifei Wang
2021-09-26  6:33   ` [dpdk-dev] [RFC PATCH v3 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-09-26  6:33   ` [dpdk-dev] [RFC PATCH v3 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-10-07 15:50     ` Ananyev, Konstantin
2021-10-07 17:40       ` Ananyev, Konstantin
2021-10-20  6:20         ` [dpdk-dev] 回复: " Feifei Wang
2021-09-26  6:33   ` [dpdk-dev] [RFC PATCH v3 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-10-20  8:45   ` [dpdk-dev] [PATCH v4 0/5] add new definitions for wait scheme Feifei Wang
2021-10-20  8:45     ` [dpdk-dev] [PATCH v4 1/5] eal: " Feifei Wang
2021-10-21 16:24       ` Ananyev, Konstantin
2021-10-25  9:20         ` [dpdk-dev] 回复: " Feifei Wang
2021-10-25 14:28           ` [dpdk-dev] " Ananyev, Konstantin
2021-10-26  1:08             ` [dpdk-dev] 回复: " Feifei Wang
2021-10-22  0:10       ` [dpdk-dev] " Jerin Jacob
2021-10-25  9:30         ` [dpdk-dev] 回复: " Feifei Wang
2021-10-25  9:43           ` [dpdk-dev] " Jerin Jacob
2021-10-26  1:11             ` [dpdk-dev] 回复: " Feifei Wang
2021-10-20  8:45     ` [dpdk-dev] [PATCH v4 2/5] eal: use wait event for read pflock Feifei Wang
2021-10-20  8:45     ` [dpdk-dev] [PATCH v4 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-10-20  8:45     ` [dpdk-dev] [PATCH v4 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-10-20  8:45     ` [dpdk-dev] [PATCH v4 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-10-26  8:01 ` [dpdk-dev] [PATCH v5 0/5] add new definitions for wait scheme Feifei Wang
2021-10-26  8:02   ` [dpdk-dev] [PATCH v5 1/5] eal: " Feifei Wang
2021-10-26  8:08     ` [dpdk-dev] 回复: " Feifei Wang
2021-10-26  9:46       ` [dpdk-dev] " Ananyev, Konstantin
2021-10-26  9:59         ` Ananyev, Konstantin
2021-10-27  6:56           ` [dpdk-dev] 回复: " Feifei Wang
2021-10-26  8:02   ` [dpdk-dev] [PATCH v5 2/5] eal: use wait event for read pflock Feifei Wang
2021-10-26  8:02   ` [dpdk-dev] [PATCH v5 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-10-26  8:02   ` [dpdk-dev] [PATCH v5 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-10-26  8:18     ` [dpdk-dev] 回复: " Feifei Wang
2021-10-26  9:43       ` [dpdk-dev] " Ananyev, Konstantin
2021-10-26 12:56         ` Ananyev, Konstantin
2021-10-27  7:04           ` [dpdk-dev] 回复: " Feifei Wang
2021-10-27  7:31             ` Feifei Wang
2021-10-27 14:47             ` [dpdk-dev] " Ananyev, Konstantin
2021-10-28  6:24               ` [dpdk-dev] 回复: " Feifei Wang
2021-10-26  8:02   ` [dpdk-dev] [PATCH v5 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-10-27  8:10 ` [dpdk-dev] [PATCH v6 0/4] add new definitions for wait scheme Feifei Wang
2021-10-27  8:10   ` [dpdk-dev] [PATCH v6 1/4] eal: " Feifei Wang
2021-10-27  8:10   ` [dpdk-dev] [PATCH v6 2/4] eal: use wait event for read pflock Feifei Wang
2021-10-27  8:10   ` [dpdk-dev] [PATCH v6 3/4] eal: use wait event scheme for mcslock Feifei Wang
2021-10-27 11:16     ` Mattias Rönnblom
2021-10-28  6:32       ` [dpdk-dev] 回复: " Feifei Wang
2021-10-27  8:10   ` [dpdk-dev] [PATCH v6 4/4] lib/distributor: use wait event scheme Feifei Wang
2021-10-27 10:57   ` [dpdk-dev] [PATCH v6 0/4] add new definitions for wait scheme Jerin Jacob
2021-10-28  6:33     ` [dpdk-dev] 回复: " Feifei Wang
2021-10-28  6:56 ` [dpdk-dev] [PATCH v7 0/5] " Feifei Wang
2021-10-28  6:56   ` [dpdk-dev] [PATCH v7 1/5] eal: " Feifei Wang
2021-10-28  7:15     ` Jerin Jacob
2021-10-28  7:40       ` [dpdk-dev] 回复: " Feifei Wang
2021-10-28  7:51         ` [dpdk-dev] " Jerin Jacob
2021-10-28  9:27           ` [dpdk-dev] 回复: " Feifei Wang
2021-10-28 13:14     ` [dpdk-dev] " Ananyev, Konstantin
2021-10-28  6:56   ` [dpdk-dev] [PATCH v7 2/5] eal: use wait event for read pflock Feifei Wang
2021-10-28  6:56   ` [dpdk-dev] [PATCH v7 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-10-28  7:02     ` Jerin Jacob
2021-10-28  7:14       ` [dpdk-dev] 回复: " Feifei Wang
2021-10-28  6:56   ` [dpdk-dev] [PATCH v7 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-10-28 13:15     ` Ananyev, Konstantin
2021-10-28  6:56   ` [dpdk-dev] [PATCH v7 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-10-29  8:20 ` [dpdk-dev] [PATCH v8 0/5] add new definitions for wait scheme Feifei Wang
2021-10-29  8:20   ` [dpdk-dev] [PATCH v8 1/5] eal: " Feifei Wang
2021-10-29 13:54     ` Jerin Jacob
2021-10-31  8:38     ` David Marchand
2021-11-01  2:29       ` [dpdk-dev] 回复: " Feifei Wang
2021-10-29  8:20   ` [dpdk-dev] [PATCH v8 2/5] eal: use wait event for read pflock Feifei Wang
2021-10-29 13:55     ` Jerin Jacob
2021-10-31  8:37     ` David Marchand
2021-10-29  8:20   ` [dpdk-dev] [PATCH v8 3/5] eal: use wait event scheme for mcslock Feifei Wang
2021-10-29 13:55     ` Jerin Jacob
2021-10-31  8:37     ` David Marchand
2021-10-29  8:20   ` [dpdk-dev] [PATCH v8 4/5] lib/bpf: use wait event scheme for Rx/Tx iteration Feifei Wang
2021-10-31  8:37     ` David Marchand
2021-10-29  8:20   ` [dpdk-dev] [PATCH v8 5/5] lib/distributor: use wait event scheme Feifei Wang
2021-10-29 13:58     ` Jerin Jacob
2021-10-31  8:38       ` David Marchand
2021-11-01 12:44       ` David Hunt
2021-11-01  6:00 ` [dpdk-dev] [PATCH v9 0/5] add new helper for wait scheme Feifei Wang
2021-11-01  6:00   ` [dpdk-dev] [PATCH v9 1/5] eal: add a new generic " Feifei Wang
2021-11-01  6:00   ` [dpdk-dev] [PATCH v9 2/5] pflock: use wait until scheme for read pflock Feifei Wang
2021-11-03 14:46     ` David Marchand
2021-11-04  1:24       ` [dpdk-dev] 回复: " Feifei Wang
2021-11-01  6:00   ` [dpdk-dev] [PATCH v9 3/5] mcslock: use wait until scheme for mcslock Feifei Wang
2021-11-01  6:00   ` [dpdk-dev] [PATCH v9 4/5] bpf: use wait until scheme for Rx/Tx iteration Feifei Wang
2021-11-01  6:00   ` [dpdk-dev] [PATCH v9 5/5] distributor: use wait until scheme Feifei Wang
2021-11-01 16:05     ` Pattan, Reshma
2021-11-02  2:00       ` [dpdk-dev] 回复: " Feifei Wang
2021-11-03 14:55   ` [dpdk-dev] [PATCH v9 0/5] add new helper for wait scheme David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210902053253.3017858-2-feifei.wang2@arm.com \
    --to=feifei.wang2@arm.com \
    --cc=dev@dpdk.org \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).