From: Cunming Liang <cunming.liang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [RFC PATCH 7/7] eal: macro for cpuset w/ or w/o CPU_ALLOC
Date: Thu, 11 Dec 2014 10:04:50 +0800 [thread overview]
Message-ID: <1418263490-21088-8-git-send-email-cunming.liang@intel.com> (raw)
In-Reply-To: <1418263490-21088-1-git-send-email-cunming.liang@intel.com>
Signed-off-by: Cunming Liang <cunming.liang@intel.com>
---
lib/librte_eal/linuxapp/eal/eal_thread.c | 144 +++++++++++++++++--------------
1 file changed, 81 insertions(+), 63 deletions(-)
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index a584e3b..05cebe4 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -103,13 +103,6 @@ rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
}
-/* set affinity for current thread */
-static int
-__eal_thread_set_affinity(pthread_t thread, unsigned lcore)
-{
-
- int s;
-
/*
* According to the section VERSIONS of the CPU_ALLOC man page:
*
@@ -124,38 +117,62 @@ __eal_thread_set_affinity(pthread_t thread, unsigned lcore)
* first appeared in glibc 2.7.
*/
#if defined(CPU_ALLOC)
+#define INIT_CPUSET(size, cpusetp) \
+ do { \
+ cpusetp = CPU_ALLOC(RTE_MAX_LCORE); \
+ if (cpusetp == NULL) \
+ rte_panic("CPU_ALLOC failed\n"); \
+ \
+ size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); \
+ CPU_ZERO_S(size, cpusetp); \
+ } while(0)
+
+#define CLEAN_CPUSET(cpusetp) \
+ do { \
+ CPU_FREE(cpusetp); \
+ } while(0)
+
+#define SET_CPUSET(lcore, size, cpusetp) \
+ CPU_SET_S(lcore, size, cpusetp)
+
+#else /* CPU_ALLOC */
+
+#define INIT_CPUSET(size, cpusetp) \
+ do { \
+ cpu_set_t cpuset; \
+ cpusetp = &cpuset; \
+ size = sizeof(cpuset); \
+ CPU_ZERO(&cpuset); \
+ } while(0)
+
+#define CLEAN_CPUSET(cpusetp)
+
+#define SET_CPUSET(lcore, size, cpusetp) \
+ CPU_SET(lcore, cpusetp);
+
+#endif
+
+
+/* set affinity for current thread */
+static int
+__eal_thread_set_affinity(pthread_t thread, unsigned lcore)
+{
+ int s;
size_t size;
cpu_set_t *cpusetp;
- cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
- if (cpusetp == NULL) {
- RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
- return -1;
- }
-
- size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
- CPU_ZERO_S(size, cpusetp);
- CPU_SET_S(lcore, size, cpusetp);
+ INIT_CPUSET(size, cpusetp);
+ SET_CPUSET(lcore, size, cpusetp);
s = pthread_setaffinity_np(thread, size, cpusetp);
if (s != 0) {
RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- CPU_FREE(cpusetp);
+ CLEAN_CPUSET(cpusetp);
return -1;
}
- CPU_FREE(cpusetp);
-#else /* CPU_ALLOC */
- cpu_set_t cpuset;
- CPU_ZERO( &cpuset );
- CPU_SET(lcore, &cpuset );
+ CLEAN_CPUSET(cpusetp);
- s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- return -1;
- }
-#endif
return 0;
}
@@ -248,6 +265,9 @@ __put_linear_tid(uint64_t tid)
struct eal_thread_cb *pcb;
uint8_t shift;
+ if (tid >= RTE_MAX_THREAD)
+ return;
+
mz = rte_memzone_lookup(LINEAR_THREAD_ID_POOL);
if (!mz)
return;
@@ -352,55 +372,28 @@ rte_pthread_assign_cpuset(pthread_t thread, unsigned lcore[], unsigned num)
{
int s;
unsigned i;
-
-#if defined(CPU_ALLOC)
size_t size;
cpu_set_t *cpusetp;
- cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
- if (cpusetp == NULL) {
- RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
- return -1;
- }
-
- size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
- CPU_ZERO_S(size, cpusetp);
+ INIT_CPUSET(size, cpusetp);
for (i = 0; i < num; i++) {
if (!rte_lcore_is_enabled(lcore[i])) {
RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]);
- CPU_FREE(cpusetp);
+ CLEAN_CPUSET(cpusetp);
return -1;
}
- CPU_SET_S(lcore[i], size, cpusetp);
+ SET_CPUSET(lcore[i], size, cpusetp);
}
s = pthread_setaffinity_np(thread, size, cpusetp);
if (s != 0) {
RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- CPU_FREE(cpusetp);
+ CLEAN_CPUSET(cpusetp);
return -1;
}
- CPU_FREE(cpusetp);
-#else /* CPU_ALLOC */
- cpu_set_t cpuset;
- CPU_ZERO(&cpuset);
-
- for (i = 0; i < num; i++) {
- if (!rte_lcore_is_enabled(lcore[i])) {
- RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]);
- return -1;
- }
- CPU_SET(lcore[i], &cpuset);
- }
-
- s = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
- if (s != 0) {
- RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
- return -1;
- }
-#endif
+ CLEAN_CPUSET(cpusetp);
return 0;
}
@@ -409,9 +402,20 @@ int
rte_pthread_prepare(void)
{
unsigned long ltid;
+ unsigned lcore;
+
if (__get_linear_tid(<id) < 0)
return -1;
+
RTE_PER_LCORE(_thread_id) = ltid;
+
+ lcore = sched_getcpu();
+ if (!rte_lcore_is_enabled(lcore))
+ RTE_LOG(WARNING, EAL, "lcore %u is not enabled\n", lcore);
+ else
+ RTE_PER_LCORE(_lcore_id) = lcore;
+
+ return 0;
}
void
@@ -424,16 +428,30 @@ int
rte_pthread_create(pthread_t *tid, void *(*work)(void *), void *arg)
{
int ret;
+ pthread_attr_t attr;
+ size_t size;
+ cpu_set_t *cpusetp;
+ pthread_attr_t *pattr = NULL;
if (tid == NULL || work == NULL)
return -1;
- ret = pthread_create(tid, NULL, work, arg);
+ INIT_CPUSET(size, cpusetp);
+
+ SET_CPUSET(rte_lcore_id(), size, cpusetp);
+
+ pthread_attr_init(&attr);
+ if (!pthread_attr_setaffinity_np(&attr, size, cpusetp))
+ pattr = &attr;
+
+ CLEAN_CPUSET(cpusetp);
+
+ ret = pthread_create(tid, pattr, work, arg);
if (ret != 0)
return -1;
- if (__eal_thread_set_affinity(*tid, rte_lcore_id()) < 0)
- rte_panic("cannot set affinity\n");
+ pthread_attr_destroy(&attr);
return 0;
}
+
--
1.8.1.4
next prev parent reply other threads:[~2014-12-11 2:05 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-12-11 2:04 [dpdk-dev] [RFC PATCH 0/7] support multi-phtread per lcore Cunming Liang
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 1/7] eal: add linear thread id as pthread-local variable Cunming Liang
2014-12-16 7:00 ` Qiu, Michael
2014-12-22 19:02 ` Ananyev, Konstantin
2014-12-23 9:56 ` Liang, Cunming
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 2/7] mempool: use linear-tid as mempool cache index Cunming Liang
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 3/7] ring: use linear-tid as ring debug stats index Cunming Liang
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 4/7] eal: add simple API for multi-pthread Cunming Liang
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 5/7] testpmd: support multi-pthread mode Cunming Liang
2014-12-11 2:04 ` [dpdk-dev] [RFC PATCH 6/7] sample: add new sample for multi-pthread Cunming Liang
2014-12-11 2:04 ` Cunming Liang [this message]
2014-12-11 2:54 ` [dpdk-dev] [RFC PATCH 0/7] support multi-phtread per lcore Jayakumar, Muthurajan
2014-12-11 9:56 ` Walukiewicz, Miroslaw
2014-12-12 5:44 ` Liang, Cunming
2014-12-15 11:10 ` Walukiewicz, Miroslaw
2014-12-15 11:53 ` Liang, Cunming
2014-12-18 12:20 ` Walukiewicz, Miroslaw
2014-12-18 14:32 ` Bruce Richardson
2014-12-18 15:11 ` Olivier MATZ
2014-12-18 16:04 ` Bruce Richardson
2014-12-18 16:15 ` Stephen Hemminger
2014-12-19 1:28 ` Liang, Cunming
2014-12-19 10:03 ` Bruce Richardson
2014-12-22 1:51 ` Liang, Cunming
2014-12-22 9:46 ` Bruce Richardson
2014-12-22 10:01 ` Walukiewicz, Miroslaw
2014-12-23 9:45 ` Liang, Cunming
2014-12-22 18:28 ` Stephen Hemminger
2014-12-23 9:19 ` Walukiewicz, Miroslaw
2014-12-23 9:23 ` Bruce Richardson
2014-12-23 9:51 ` Liang, Cunming
2015-01-08 17:05 ` Ananyev, Konstantin
2015-01-08 17:23 ` Richardson, Bruce
2015-01-09 9:51 ` Liang, Cunming
2015-01-09 9:40 ` Liang, Cunming
2015-01-09 11:52 ` Ananyev, Konstantin
2015-01-09 9:45 ` Liang, Cunming
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1418263490-21088-8-git-send-email-cunming.liang@intel.com \
--to=cunming.liang@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).