From: Chao Zhu <bjzhuc@cn.ibm.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 5/7] Split spinlock operations to architecture specific
Date: Fri, 26 Sep 2014 05:33:36 -0400 [thread overview]
Message-ID: <1411724018-7738-6-git-send-email-bjzhuc@cn.ibm.com> (raw)
In-Reply-To: <1411724018-7738-1-git-send-email-bjzhuc@cn.ibm.com>
This patch splits the spinlock operations from DPDK and push them to
architecture specific arch directories, so that other processor
architecture to support DPDK can be easily adopted.
Signed-off-by: Chao Zhu <bjzhuc@cn.ibm.com>
---
lib/librte_eal/common/Makefile | 2 +-
.../common/include/i686/arch/rte_spinlock_arch.h | 128 ++++++++++++++++++++
lib/librte_eal/common/include/rte_spinlock.h | 55 +--------
.../common/include/x86_64/arch/rte_spinlock_arch.h | 128 ++++++++++++++++++++
4 files changed, 261 insertions(+), 52 deletions(-)
create mode 100644 lib/librte_eal/common/include/i686/arch/rte_spinlock_arch.h
create mode 100644 lib/librte_eal/common/include/x86_64/arch/rte_spinlock_arch.h
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index bb175ca..249ea2f 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -46,7 +46,7 @@ ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
INC += rte_warnings.h
endif
-ARCH_INC := rte_atomic.h rte_atomic_arch.h rte_byteorder_arch.h rte_cycles_arch.h rte_prefetch_arch.h
+ARCH_INC := rte_atomic.h rte_atomic_arch.h rte_byteorder_arch.h rte_cycles_arch.h rte_prefetch_arch.h rte_spinlock_arch.h
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/arch := \
diff --git a/lib/librte_eal/common/include/i686/arch/rte_spinlock_arch.h b/lib/librte_eal/common/include/i686/arch/rte_spinlock_arch.h
new file mode 100644
index 0000000..2b13dcd
--- /dev/null
+++ b/lib/librte_eal/common/include/i686/arch/rte_spinlock_arch.h
@@ -0,0 +1,128 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SPINLOCK_ARCH_H_
+#define _RTE_SPINLOCK_ARCH_H_
+
+#include <rte_lcore.h>
+#ifdef RTE_FORCE_INTRINSICS
+#include <rte_common.h>
+#endif
+
+/**
+ * The rte_spinlock_t type.
+ */
+typedef struct {
+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+ * Take the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_arch_spinlock_lock(rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int lock_val = 1;
+ asm volatile (
+ "1:\n"
+ "xchg %[locked], %[lv]\n"
+ "test %[lv], %[lv]\n"
+ "jz 3f\n"
+ "2:\n"
+ "pause\n"
+ "cmpl $0, %[locked]\n"
+ "jnz 2b\n"
+ "jmp 1b\n"
+ "3:\n"
+ : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
+ : "[lv]" (lock_val)
+ : "memory");
+#else
+ while (__sync_lock_test_and_set(&sl->locked, 1))
+ while(sl->locked)
+ rte_pause();
+#endif
+}
+
+/**
+ * Release the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_arch_spinlock_unlock (rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int unlock_val = 0;
+ asm volatile (
+ "xchg %[locked], %[ulv]\n"
+ : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
+ : "[ulv]" (unlock_val)
+ : "memory");
+#else
+ __sync_lock_release(&sl->locked);
+#endif
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_arch_spinlock_trylock (rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int lockval = 1;
+
+ asm volatile (
+ "xchg %[locked], %[lockval]"
+ : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
+ : "[lockval]" (lockval)
+ : "memory");
+
+ return (lockval == 0);
+#else
+ return (__sync_lock_test_and_set(&sl->locked,1) == 0);
+#endif
+}
+
+#endif /* _RTE_SPINLOCK_ARCH_H_ */
\ No newline at end of file
diff --git a/lib/librte_eal/common/include/rte_spinlock.h b/lib/librte_eal/common/include/rte_spinlock.h
index 661908d..1cab17f 100644
--- a/lib/librte_eal/common/include/rte_spinlock.h
+++ b/lib/librte_eal/common/include/rte_spinlock.h
@@ -55,13 +55,7 @@ extern "C" {
#ifdef RTE_FORCE_INTRINSICS
#include <rte_common.h>
#endif
-
-/**
- * The rte_spinlock_t type.
- */
-typedef struct {
- volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
-} rte_spinlock_t;
+#include <arch/rte_spinlock_arch.h>
/**
* A static spinlock initializer.
@@ -89,27 +83,7 @@ rte_spinlock_init(rte_spinlock_t *sl)
static inline void
rte_spinlock_lock(rte_spinlock_t *sl)
{
-#ifndef RTE_FORCE_INTRINSICS
- int lock_val = 1;
- asm volatile (
- "1:\n"
- "xchg %[locked], %[lv]\n"
- "test %[lv], %[lv]\n"
- "jz 3f\n"
- "2:\n"
- "pause\n"
- "cmpl $0, %[locked]\n"
- "jnz 2b\n"
- "jmp 1b\n"
- "3:\n"
- : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
- : "[lv]" (lock_val)
- : "memory");
-#else
- while (__sync_lock_test_and_set(&sl->locked, 1))
- while(sl->locked)
- rte_pause();
-#endif
+ rte_arch_spinlock_lock(sl);
}
/**
@@ -121,16 +95,7 @@ rte_spinlock_lock(rte_spinlock_t *sl)
static inline void
rte_spinlock_unlock (rte_spinlock_t *sl)
{
-#ifndef RTE_FORCE_INTRINSICS
- int unlock_val = 0;
- asm volatile (
- "xchg %[locked], %[ulv]\n"
- : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
- : "[ulv]" (unlock_val)
- : "memory");
-#else
- __sync_lock_release(&sl->locked);
-#endif
+ rte_arch_spinlock_unlock(sl);
}
/**
@@ -144,19 +109,7 @@ rte_spinlock_unlock (rte_spinlock_t *sl)
static inline int
rte_spinlock_trylock (rte_spinlock_t *sl)
{
-#ifndef RTE_FORCE_INTRINSICS
- int lockval = 1;
-
- asm volatile (
- "xchg %[locked], %[lockval]"
- : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
- : "[lockval]" (lockval)
- : "memory");
-
- return (lockval == 0);
-#else
- return (__sync_lock_test_and_set(&sl->locked,1) == 0);
-#endif
+ return rte_arch_spinlock_trylock(sl);
}
/**
diff --git a/lib/librte_eal/common/include/x86_64/arch/rte_spinlock_arch.h b/lib/librte_eal/common/include/x86_64/arch/rte_spinlock_arch.h
new file mode 100644
index 0000000..2b13dcd
--- /dev/null
+++ b/lib/librte_eal/common/include/x86_64/arch/rte_spinlock_arch.h
@@ -0,0 +1,128 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SPINLOCK_ARCH_H_
+#define _RTE_SPINLOCK_ARCH_H_
+
+#include <rte_lcore.h>
+#ifdef RTE_FORCE_INTRINSICS
+#include <rte_common.h>
+#endif
+
+/**
+ * The rte_spinlock_t type.
+ */
+typedef struct {
+ volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+ * Take the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_arch_spinlock_lock(rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int lock_val = 1;
+ asm volatile (
+ "1:\n"
+ "xchg %[locked], %[lv]\n"
+ "test %[lv], %[lv]\n"
+ "jz 3f\n"
+ "2:\n"
+ "pause\n"
+ "cmpl $0, %[locked]\n"
+ "jnz 2b\n"
+ "jmp 1b\n"
+ "3:\n"
+ : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
+ : "[lv]" (lock_val)
+ : "memory");
+#else
+ while (__sync_lock_test_and_set(&sl->locked, 1))
+ while(sl->locked)
+ rte_pause();
+#endif
+}
+
+/**
+ * Release the spinlock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ */
+static inline void
+rte_arch_spinlock_unlock (rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int unlock_val = 0;
+ asm volatile (
+ "xchg %[locked], %[ulv]\n"
+ : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
+ : "[ulv]" (unlock_val)
+ : "memory");
+#else
+ __sync_lock_release(&sl->locked);
+#endif
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param sl
+ * A pointer to the spinlock.
+ * @return
+ * 1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_arch_spinlock_trylock (rte_spinlock_t *sl)
+{
+#ifndef RTE_FORCE_INTRINSICS
+ int lockval = 1;
+
+ asm volatile (
+ "xchg %[locked], %[lockval]"
+ : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
+ : "[lockval]" (lockval)
+ : "memory");
+
+ return (lockval == 0);
+#else
+ return (__sync_lock_test_and_set(&sl->locked,1) == 0);
+#endif
+}
+
+#endif /* _RTE_SPINLOCK_ARCH_H_ */
\ No newline at end of file
--
1.7.1
next prev parent reply other threads:[~2014-09-26 9:27 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-26 9:33 [dpdk-dev] [PATCH 0/7] Patches to split architecture specific operations from DPDK Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 1/7] Split atomic operations to architecture specific Chao Zhu
2014-09-29 11:05 ` Bruce Richardson
2014-09-29 15:24 ` Neil Horman
2014-09-30 2:18 ` Chao CH Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 2/7] Split byte order " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 3/7] Split CPU cycle operation " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 4/7] Split prefetch operations " Chao Zhu
2014-09-26 9:33 ` Chao Zhu [this message]
2014-09-26 9:33 ` [dpdk-dev] [PATCH 6/7] Split memcpy operation " Chao Zhu
2014-09-26 9:33 ` [dpdk-dev] [PATCH 7/7] Split CPU flags operations " Chao Zhu
2014-10-03 13:21 ` [dpdk-dev] [PATCH 0/7] Patches to split architecture specific operations from DPDK David Marchand
2014-10-03 13:29 ` Bruce Richardson
2014-10-13 2:36 ` Chao CH Zhu
2014-10-06 21:46 ` Cyril Chemparathy
2014-10-12 9:14 ` Chao CH Zhu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1411724018-7738-6-git-send-email-bjzhuc@cn.ibm.com \
--to=bjzhuc@cn.ibm.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).