From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, david.marchand@redhat.com,
stephen@networkplumber.org,
Tyler Retzlaff <roretzla@linux.microsoft.com>,
stable@dpdk.org
Subject: [PATCH v5 2/2] eal: fix failure path race setting new thread affinity
Date: Wed, 15 Mar 2023 17:07:04 -0700 [thread overview]
Message-ID: <1678925224-2706-3-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1678925224-2706-1-git-send-email-roretzla@linux.microsoft.com>
In rte_thread_create setting affinity after pthread_create may fail.
Such a failure should result in the entire rte_thread_create failing
but doesn't.
Additionally if there is a failure to set affinity a race exists where
the creating thread will free ctx and depending on scheduling of the new
thread it may also free ctx (double free).
Resolve the above by setting the affinity from the newly created thread
using a condition variable to signal the completion of the thread
start wrapper having completed.
Since we are now waiting for the thread start wrapper to complete we can
allocate the thread start wrapper context on the stack. While here clean
up the variable naming in the context to better highlight the fields of
the context require synchronization between the creating and created
thread.
Fixes: ce6e911d20f6 ("eal: add thread lifetime API")
Cc: stable@dpdk.org
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
lib/eal/unix/rte_thread.c | 70 +++++++++++++++++++++++++++++------------------
1 file changed, 43 insertions(+), 27 deletions(-)
diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c
index 37ebfcf..5992b04 100644
--- a/lib/eal/unix/rte_thread.c
+++ b/lib/eal/unix/rte_thread.c
@@ -16,9 +16,14 @@ struct eal_tls_key {
pthread_key_t thread_index;
};
-struct thread_routine_ctx {
+struct thread_start_context {
rte_thread_func thread_func;
- void *routine_args;
+ void *thread_args;
+ const rte_thread_attr_t *thread_attr;
+ pthread_mutex_t wrapper_mutex;
+ pthread_cond_t wrapper_cond;
+ int wrapper_ret;
+ volatile int wrapper_done;
};
static int
@@ -81,13 +86,29 @@ struct thread_routine_ctx {
}
static void *
-thread_func_wrapper(void *arg)
+thread_start_wrapper(void *arg)
{
- struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
+ struct thread_start_context *ctx = (struct thread_start_context *)arg;
+ rte_thread_func thread_func = ctx->thread_func;
+ void *thread_args = ctx->thread_args;
+ int ret = 0;
- free(arg);
+ if (ctx->thread_attr != NULL && CPU_COUNT(&ctx->thread_attr->cpuset) > 0) {
+ ret = rte_thread_set_affinity(&ctx->thread_attr->cpuset);
+ if (ret != 0)
+ RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity failed\n");
+ }
- return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args);
+ pthread_mutex_lock(&ctx->wrapper_mutex);
+ ctx->wrapper_ret = ret;
+ ctx->wrapper_done = 1;
+ pthread_cond_signal(&ctx->wrapper_cond);
+ pthread_mutex_unlock(&ctx->wrapper_mutex);
+
+ if (ret != 0)
+ return NULL;
+
+ return (void *)(uintptr_t)thread_func(thread_args);
}
int
@@ -98,20 +119,17 @@ struct thread_routine_ctx {
int ret = 0;
pthread_attr_t attr;
pthread_attr_t *attrp = NULL;
- struct thread_routine_ctx *ctx;
struct sched_param param = {
.sched_priority = 0,
};
int policy = SCHED_OTHER;
-
- ctx = calloc(1, sizeof(*ctx));
- if (ctx == NULL) {
- RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n");
- ret = ENOMEM;
- goto cleanup;
- }
- ctx->routine_args = args;
- ctx->thread_func = thread_func;
+ struct thread_start_context ctx = {
+ .thread_func = thread_func,
+ .thread_args = args,
+ .thread_attr = thread_attr,
+ .wrapper_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .wrapper_cond = PTHREAD_COND_INITIALIZER,
+ };
if (thread_attr != NULL) {
ret = pthread_attr_init(&attr);
@@ -158,24 +176,22 @@ struct thread_routine_ctx {
}
ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp,
- thread_func_wrapper, ctx);
+ thread_start_wrapper, &ctx);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_create failed\n");
goto cleanup;
}
- if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) {
- ret = rte_thread_set_affinity_by_id(*thread_id,
- &thread_attr->cpuset);
- if (ret != 0) {
- RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n");
- goto cleanup;
- }
- }
+ pthread_mutex_lock(&ctx.wrapper_mutex);
+ while (ctx.wrapper_done != 1)
+ pthread_cond_wait(&ctx.wrapper_cond, &ctx.wrapper_mutex);
+ ret = ctx.wrapper_ret;
+ pthread_mutex_unlock(&ctx.wrapper_mutex);
+
+ if (ret != 0)
+ pthread_join((pthread_t)thread_id->opaque_id, NULL);
- ctx = NULL;
cleanup:
- free(ctx);
if (attrp != NULL)
pthread_attr_destroy(&attr);
--
1.8.3.1
next prev parent reply other threads:[~2023-03-16 0:07 UTC|newest]
Thread overview: 34+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-02 18:44 [PATCH 1/2] eal: fix failure race and behavior of thread create Tyler Retzlaff
2023-03-02 18:44 ` [PATCH 2/2] eal/windows: fix create thread failure behavior Tyler Retzlaff
2023-03-07 14:33 ` [PATCH 1/2] eal: fix failure race and behavior of thread create David Marchand
2023-03-09 9:17 ` David Marchand
2023-03-09 9:58 ` Thomas Monjalon
2023-03-09 20:49 ` Tyler Retzlaff
2023-03-09 21:05 ` David Marchand
2023-03-13 23:31 ` [PATCH v2 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-13 23:31 ` [PATCH v2 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-13 23:31 ` [PATCH v2 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-14 11:47 ` [PATCH v2 0/2] fix race in rte_thread_create failure path David Marchand
2023-03-14 13:59 ` Tyler Retzlaff
2023-03-14 22:44 ` [PATCH v3 " Tyler Retzlaff
2023-03-14 22:44 ` [PATCH v3 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-14 22:44 ` [PATCH v3 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-14 22:50 ` [PATCH v4 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-14 22:50 ` [PATCH v4 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-14 22:50 ` [PATCH v4 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-15 1:20 ` Stephen Hemminger
2023-03-15 1:26 ` Tyler Retzlaff
2023-03-16 0:04 ` [PATCH v4 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-16 0:04 ` [PATCH v4 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-16 0:04 ` [PATCH v4 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-16 0:07 ` [PATCH v5 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-16 0:07 ` [PATCH v5 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-16 0:07 ` Tyler Retzlaff [this message]
2023-03-17 10:45 ` [PATCH v5 2/2] eal: fix failure path race setting new thread affinity David Marchand
2023-03-17 14:49 ` Tyler Retzlaff
2023-03-17 18:51 ` David Marchand
2023-03-17 21:20 ` Tyler Retzlaff
2023-03-17 18:52 ` [PATCH v6] eal/unix: fix thread creation David Marchand
2023-03-17 21:24 ` Tyler Retzlaff
2023-03-18 18:26 ` David Marchand
2023-03-18 18:26 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1678925224-2706-3-git-send-email-roretzla@linux.microsoft.com \
--to=roretzla@linux.microsoft.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=stable@dpdk.org \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).