DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, david.marchand@redhat.com,
	Tyler Retzlaff <roretzla@linux.microsoft.com>,
	stable@dpdk.org
Subject: [PATCH v4 2/2] eal: fix failure path race setting new thread affinity
Date: Tue, 14 Mar 2023 15:50:39 -0700	[thread overview]
Message-ID: <1678834239-11569-3-git-send-email-roretzla@linux.microsoft.com> (raw)
In-Reply-To: <1678834239-11569-1-git-send-email-roretzla@linux.microsoft.com>

In rte_thread_create setting affinity after pthread_create may fail.
Such a failure should result in the entire rte_thread_create failing
but doesn't.

Additionally if there is a failure to set affinity a race exists where
the creating thread will free ctx and depending on scheduling of the new
thread it may also free ctx (double free).

Resolve both of the above issues by setting the affinity from the newly
created thread instead of after thread creation. To achieve this modify
the existing thread wrapper to allow the creating thread to wait on the
result of the set affinity operation.

Fixes: ce6e911d20f6 ("eal: add thread lifetime API")
Cc: stable@dpdk.org

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 lib/eal/unix/rte_thread.c | 51 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 40 insertions(+), 11 deletions(-)

diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c
index 37ebfcf..9fcf53b 100644
--- a/lib/eal/unix/rte_thread.c
+++ b/lib/eal/unix/rte_thread.c
@@ -16,8 +16,17 @@ struct eal_tls_key {
 	pthread_key_t thread_index;
 };
 
+enum __rte_thread_wrapper_status {
+	THREAD_WRAPPER_LAUNCHING, /* Yet to call thread_func function */
+	THREAD_WRAPPER_RUNNING, /* Thread is running successfully */
+	THREAD_WRAPPER_ERROR /* Thread thread_wrapper encountered an error */
+};
+
 struct thread_routine_ctx {
 	rte_thread_func thread_func;
+	const rte_thread_attr_t *thread_attr;
+	int thread_wrapper_ret;
+	enum __rte_thread_wrapper_status thread_wrapper_status;
 	void *routine_args;
 };
 
@@ -83,11 +92,22 @@ struct thread_routine_ctx {
 static void *
 thread_func_wrapper(void *arg)
 {
-	struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
-
-	free(arg);
+	struct thread_routine_ctx *ctx = (struct thread_routine_ctx *)arg;
+	rte_thread_func thread_func = ctx->thread_func;
+	void *thread_args = ctx->routine_args;
+
+	if (ctx->thread_attr != NULL && CPU_COUNT(&ctx->thread_attr->cpuset) > 0) {
+		ctx->thread_wrapper_ret = rte_thread_set_affinity(&ctx->thread_attr->cpuset);
+		if (ctx->thread_wrapper_ret != 0) {
+			RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity failed\n");
+			__atomic_store_n(&ctx->thread_wrapper_status,
+				THREAD_WRAPPER_ERROR, __ATOMIC_RELEASE);
+		}
+	}
+	__atomic_store_n(&ctx->thread_wrapper_status,
+		THREAD_WRAPPER_RUNNING, __ATOMIC_RELEASE);
 
-	return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args);
+	return (void *)(uintptr_t)thread_func(thread_args);
 }
 
 int
@@ -98,6 +118,7 @@ struct thread_routine_ctx {
 	int ret = 0;
 	pthread_attr_t attr;
 	pthread_attr_t *attrp = NULL;
+	enum __rte_thread_wrapper_status thread_wrapper_status;
 	struct thread_routine_ctx *ctx;
 	struct sched_param param = {
 		.sched_priority = 0,
@@ -111,7 +132,10 @@ struct thread_routine_ctx {
 		goto cleanup;
 	}
 	ctx->routine_args = args;
+	ctx->thread_attr = thread_attr;
 	ctx->thread_func = thread_func;
+	ctx->thread_wrapper_ret = 0;
+	ctx->thread_wrapper_status = THREAD_WRAPPER_LAUNCHING;
 
 	if (thread_attr != NULL) {
 		ret = pthread_attr_init(&attr);
@@ -164,14 +188,19 @@ struct thread_routine_ctx {
 		goto cleanup;
 	}
 
-	if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) {
-		ret = rte_thread_set_affinity_by_id(*thread_id,
-			&thread_attr->cpuset);
-		if (ret != 0) {
-			RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n");
-			goto cleanup;
-		}
+	/* Wait for the thread wrapper to initialize thread successfully */
+	while ((thread_wrapper_status =
+		__atomic_load_n(&ctx->thread_wrapper_status,
+		__ATOMIC_ACQUIRE)) == THREAD_WRAPPER_LAUNCHING)
+		sched_yield();
+
+	/* Check if the control thread encountered an error */
+	if (thread_wrapper_status == THREAD_WRAPPER_ERROR) {
+		/* thread wrapper is exiting */
+		pthread_join((pthread_t)thread_id->opaque_id, NULL);
+		ret = ctx->thread_wrapper_ret;
 	}
+	free(ctx);
 
 	ctx = NULL;
 cleanup:
-- 
1.8.3.1


  parent reply	other threads:[~2023-03-14 22:50 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-02 18:44 [PATCH 1/2] eal: fix failure race and behavior of thread create Tyler Retzlaff
2023-03-02 18:44 ` [PATCH 2/2] eal/windows: fix create thread failure behavior Tyler Retzlaff
2023-03-07 14:33 ` [PATCH 1/2] eal: fix failure race and behavior of thread create David Marchand
2023-03-09  9:17   ` David Marchand
2023-03-09  9:58     ` Thomas Monjalon
2023-03-09 20:49       ` Tyler Retzlaff
2023-03-09 21:05         ` David Marchand
2023-03-13 23:31 ` [PATCH v2 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-13 23:31   ` [PATCH v2 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-13 23:31   ` [PATCH v2 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-14 11:47   ` [PATCH v2 0/2] fix race in rte_thread_create failure path David Marchand
2023-03-14 13:59     ` Tyler Retzlaff
2023-03-14 22:44 ` [PATCH v3 " Tyler Retzlaff
2023-03-14 22:44   ` [PATCH v3 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-14 22:44   ` [PATCH v3 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-14 22:50 ` [PATCH v4 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-14 22:50   ` [PATCH v4 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-14 22:50   ` Tyler Retzlaff [this message]
2023-03-15  1:20     ` [PATCH v4 2/2] eal: fix failure path race setting new thread affinity Stephen Hemminger
2023-03-15  1:26       ` Tyler Retzlaff
2023-03-16  0:04 ` [PATCH v4 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-16  0:04   ` [PATCH v4 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-16  0:04   ` [PATCH v4 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-16  0:07 ` [PATCH v5 0/2] fix race in rte_thread_create failure path Tyler Retzlaff
2023-03-16  0:07   ` [PATCH v5 1/2] eal: make cpusetp to rte thread set affinity const Tyler Retzlaff
2023-03-16  0:07   ` [PATCH v5 2/2] eal: fix failure path race setting new thread affinity Tyler Retzlaff
2023-03-17 10:45     ` David Marchand
2023-03-17 14:49       ` Tyler Retzlaff
2023-03-17 18:51         ` David Marchand
2023-03-17 21:20           ` Tyler Retzlaff
2023-03-17 18:52 ` [PATCH v6] eal/unix: fix thread creation David Marchand
2023-03-17 21:24   ` Tyler Retzlaff
2023-03-18 18:26     ` David Marchand
2023-03-18 18:26   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1678834239-11569-3-git-send-email-roretzla@linux.microsoft.com \
    --to=roretzla@linux.microsoft.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=stable@dpdk.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).