From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 20D7BA054F; Mon, 27 Jun 2022 18:56:27 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 822A842B73; Mon, 27 Jun 2022 18:56:13 +0200 (CEST) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id E0E5C40A8B for ; Mon, 27 Jun 2022 18:56:08 +0200 (CEST) Received: by linux.microsoft.com (Postfix, from userid 1086) id 1C97020CD16D; Mon, 27 Jun 2022 09:56:08 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 1C97020CD16D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1656348968; bh=64Ociz/jIMZB6s1BGeWpzG9gMymMWkAnhzm6WFTfPME=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=NMdAGBhzi3VmhXgcMBhxAqpFaDuWgz86nJTUEnGGYbXsf79f9gphJQpIWuBNza5Ro paxJ/+WjyVNI9LjpGQpt0pxqJHwYUgtkMHehvwyfDTC737UQSxT8Izvb8zgn61XsdT xrvrM91TyZxsW62kVJcgU8W2OJDmasn6S8N6EPpc= From: Tyler Retzlaff To: dev@dpdk.org Cc: thomas@monjalon.net, dmitry.kozliuk@gmail.com, anatoly.burakov@intel.com, Tyler Retzlaff , Narcisa Vasile Subject: [PATCH v4 2/6] eal: add thread lifetime management Date: Mon, 27 Jun 2022 09:56:02 -0700 Message-Id: <1656348966-10194-3-git-send-email-roretzla@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1656348966-10194-1-git-send-email-roretzla@linux.microsoft.com> References: <1654783134-13303-1-git-send-email-roretzla@linux.microsoft.com> <1656348966-10194-1-git-send-email-roretzla@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org The *rte_thread_create()* function can optionally receive an rte_thread_attr_t object that will cause the thread to be created with the affinity and priority described by the attributes object. If no rte_thread_attr_t is passed (parameter is NULL), the default affinity and priority are used. On Windows, the function executed by a thread when the thread starts is represeneted by a function pointer of type DWORD (*func) (void*). On other platforms, the function pointer is a void* (*func) (void*). Performing a cast between these two types of function pointers to uniformize the API on all platforms may result in undefined behavior. TO fix this issue, a wrapper that respects the signature required by CreateThread() has been created on Windows. Signed-off-by: Narcisa Vasile Signed-off-by: Tyler Retzlaff --- lib/eal/include/rte_thread.h | 75 ++++++++++++++ lib/eal/unix/rte_thread.c | 135 +++++++++++++++++++++++++ lib/eal/version.map | 5 + lib/eal/windows/include/sched.h | 2 +- lib/eal/windows/rte_thread.c | 213 ++++++++++++++++++++++++++++++++-------- 5 files changed, 389 insertions(+), 41 deletions(-) diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h index 062308d..3c8ff50 100644 --- a/lib/eal/include/rte_thread.h +++ b/lib/eal/include/rte_thread.h @@ -31,6 +31,18 @@ } rte_thread_t; /** + * Thread function + * + * Function pointer to thread start routine. + * + * @param arg + * Argument passed to rte_thread_create(). + * @return + * Thread function exit value. + */ +typedef uint32_t (*rte_thread_func) (void *arg); + +/** * Thread priority values. */ enum rte_thread_priority { @@ -61,6 +73,69 @@ enum rte_thread_priority { * @warning * @b EXPERIMENTAL: this API may change without prior notice. * + * Create a new thread that will invoke the 'thread_func' routine. + * + * @param thread_id + * A pointer that will store the id of the newly created thread. + * + * @param thread_attr + * Attributes that are used at the creation of the new thread. + * + * @param thread_func + * The routine that the new thread will invoke when starting execution. + * + * @param arg + * Argument to be passed to the 'thread_func' routine. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + rte_thread_func thread_func, void *arg); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Waits for the thread identified by 'thread_id' to terminate + * + * @param thread_id + * The identifier of the thread. + * + * @param value_ptr + * Stores the exit status of the thread. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * + * Indicate that the return value of the thread is not needed and + * all thread resources should be release when the thread terminates. + * + * @param thread_id + * The id of the thread to be detached. + * + * @return + * On success, return 0. + * On failure, return a positive errno-style error number. + */ +__rte_experimental +int rte_thread_detach(rte_thread_t thread_id); + +/** + * @warning + * @b EXPERIMENTAL: this API may change without prior notice. + * * Get the id of the calling thread. * * @return diff --git a/lib/eal/unix/rte_thread.c b/lib/eal/unix/rte_thread.c index 9126595..d4c1a7f 100644 --- a/lib/eal/unix/rte_thread.c +++ b/lib/eal/unix/rte_thread.c @@ -16,6 +16,11 @@ struct eal_tls_key { pthread_key_t thread_index; }; +struct thread_routine_ctx { + rte_thread_func thread_func; + void *routine_args; +}; + static int thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri, int *pol) @@ -75,6 +80,136 @@ struct eal_tls_key { return 0; } +static void * +thread_func_wrapper(void *arg) +{ + struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg; + + free(arg); + + return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args); +} + +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + rte_thread_func thread_func, void *args) +{ + int ret = 0; + pthread_attr_t attr; + pthread_attr_t *attrp = NULL; + struct thread_routine_ctx *ctx; + struct sched_param param = { + .sched_priority = 0, + }; + int policy = SCHED_OTHER; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n"); + ret = ENOMEM; + goto cleanup; + } + ctx->routine_args = args; + ctx->thread_func = thread_func; + + if (thread_attr != NULL) { + ret = pthread_attr_init(&attr); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_init failed\n"); + goto cleanup; + } + + attrp = &attr; + + /* + * Set the inherit scheduler parameter to explicit, + * otherwise the priority attribute is ignored. + */ + ret = pthread_attr_setinheritsched(attrp, + PTHREAD_EXPLICIT_SCHED); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setinheritsched failed\n"); + goto cleanup; + } + + + if (thread_attr->priority == + RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { + ret = ENOTSUP; + goto cleanup; + } + ret = thread_map_priority_to_os_value(thread_attr->priority, + ¶m.sched_priority, &policy); + if (ret != 0) + goto cleanup; + + ret = pthread_attr_setschedpolicy(attrp, policy); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedpolicy failed\n"); + goto cleanup; + } + + ret = pthread_attr_setschedparam(attrp, ¶m); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedparam failed\n"); + goto cleanup; + } + } + + ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp, + thread_func_wrapper, ctx); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); + goto cleanup; + } + + if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) { + ret = rte_thread_set_affinity_by_id(*thread_id, + &thread_attr->cpuset); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n"); + goto cleanup; + } + } + + ctx = NULL; +cleanup: + free(ctx); + if (attrp != NULL) + pthread_attr_destroy(&attr); + + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr) +{ + int ret = 0; + void *res = (void *)(uintptr_t)0; + void **pres = NULL; + + if (value_ptr != NULL) + pres = &res; + + ret = pthread_join((pthread_t)thread_id.opaque_id, pres); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_join failed\n"); + return ret; + } + + if (value_ptr != NULL) + *value_ptr = (uint32_t)(uintptr_t)res; + + return 0; +} + +int +rte_thread_detach(rte_thread_t thread_id) +{ + return pthread_detach((pthread_t)thread_id.opaque_id); +} + rte_thread_t rte_thread_self(void) { diff --git a/lib/eal/version.map b/lib/eal/version.map index 6d8f89a..b404343 100644 --- a/lib/eal/version.map +++ b/lib/eal/version.map @@ -428,6 +428,11 @@ EXPERIMENTAL { rte_thread_self; rte_thread_set_affinity_by_id; rte_thread_set_priority; + + # added in 22.11 + rte_thread_create; + rte_thread_detach; + rte_thread_join; }; INTERNAL { diff --git a/lib/eal/windows/include/sched.h b/lib/eal/windows/include/sched.h index bc31cc8..912fed1 100644 --- a/lib/eal/windows/include/sched.h +++ b/lib/eal/windows/include/sched.h @@ -44,7 +44,7 @@ (1LL << _WHICH_BIT(b))) != 0LL) static inline int -count_cpu(rte_cpuset_t *s) +count_cpu(const rte_cpuset_t *s) { unsigned int _i; int count = 0; diff --git a/lib/eal/windows/rte_thread.c b/lib/eal/windows/rte_thread.c index 0771525..ad71be4 100644 --- a/lib/eal/windows/rte_thread.c +++ b/lib/eal/windows/rte_thread.c @@ -13,6 +13,11 @@ struct eal_tls_key { DWORD thread_index; }; +struct thread_routine_ctx { + rte_thread_func thread_func; + void *routine_args; +}; + /* Translates the most common error codes related to threads */ static int thread_translate_win32_error(DWORD error) @@ -114,6 +119,174 @@ struct eal_tls_key { return 0; } +static int +convert_cpuset_to_affinity(const rte_cpuset_t *cpuset, + PGROUP_AFFINITY affinity) +{ + int ret = 0; + PGROUP_AFFINITY cpu_affinity = NULL; + unsigned int cpu_idx; + + memset(affinity, 0, sizeof(GROUP_AFFINITY)); + affinity->Group = (USHORT)-1; + + /* Check that all cpus of the set belong to the same processor group and + * accumulate thread affinity to be applied. + */ + for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { + if (!CPU_ISSET(cpu_idx, cpuset)) + continue; + + cpu_affinity = eal_get_cpu_affinity(cpu_idx); + + if (affinity->Group == (USHORT)-1) { + affinity->Group = cpu_affinity->Group; + } else if (affinity->Group != cpu_affinity->Group) { + RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n"); + ret = ENOTSUP; + goto cleanup; + } + + affinity->Mask |= cpu_affinity->Mask; + } + + if (affinity->Mask == 0) { + ret = EINVAL; + goto cleanup; + } + +cleanup: + return ret; +} + +static DWORD +thread_func_wrapper(void *arg) +{ + struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg; + + free(arg); + + return (DWORD)ctx.thread_func(ctx.routine_args); +} + +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + rte_thread_func thread_func, void *args) +{ + int ret = 0; + DWORD tid; + HANDLE thread_handle = NULL; + GROUP_AFFINITY thread_affinity; + struct thread_routine_ctx *ctx; + + ctx = calloc(1, sizeof(*ctx)); + if (ctx == NULL) { + RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n"); + ret = ENOMEM; + goto cleanup; + } + ctx->routine_args = args; + ctx->thread_func = thread_func; + + thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx, + CREATE_SUSPENDED, &tid); + if (thread_handle == NULL) { + ret = thread_log_last_error("CreateThread()"); + goto cleanup; + } + thread_id->opaque_id = tid; + + if (thread_attr != NULL) { + if (CPU_COUNT(&thread_attr->cpuset) > 0) { + ret = convert_cpuset_to_affinity( + &thread_attr->cpuset, + &thread_affinity + ); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); + goto cleanup; + } + + if (!SetThreadGroupAffinity(thread_handle, + &thread_affinity, NULL)) { + ret = thread_log_last_error("SetThreadGroupAffinity()"); + goto cleanup; + } + } + ret = rte_thread_set_priority(*thread_id, + thread_attr->priority); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n"); + goto cleanup; + } + } + + if (ResumeThread(thread_handle) == (DWORD)-1) { + ret = thread_log_last_error("ResumeThread()"); + goto cleanup; + } + + ctx = NULL; +cleanup: + free(ctx); + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr) +{ + HANDLE thread_handle; + DWORD result; + DWORD exit_code = 0; + BOOL err; + int ret = 0; + + thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, + FALSE, thread_id.opaque_id); + if (thread_handle == NULL) { + ret = thread_log_last_error("OpenThread()"); + goto cleanup; + } + + result = WaitForSingleObject(thread_handle, INFINITE); + if (result != WAIT_OBJECT_0) { + ret = thread_log_last_error("WaitForSingleObject()"); + goto cleanup; + } + + if (value_ptr != NULL) { + err = GetExitCodeThread(thread_handle, &exit_code); + if (err == 0) { + ret = thread_log_last_error("GetExitCodeThread()"); + goto cleanup; + } + *value_ptr = exit_code; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_detach(rte_thread_t thread_id) +{ + /* No resources that need to be released. */ + RTE_SET_USED(thread_id); + + return 0; +} + rte_thread_t rte_thread_self(void) { @@ -278,46 +451,6 @@ struct eal_tls_key { return output; } -static int -convert_cpuset_to_affinity(const rte_cpuset_t *cpuset, - PGROUP_AFFINITY affinity) -{ - int ret = 0; - PGROUP_AFFINITY cpu_affinity = NULL; - unsigned int cpu_idx; - - memset(affinity, 0, sizeof(GROUP_AFFINITY)); - affinity->Group = (USHORT)-1; - - /* Check that all cpus of the set belong to the same processor group and - * accumulate thread affinity to be applied. - */ - for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { - if (!CPU_ISSET(cpu_idx, cpuset)) - continue; - - cpu_affinity = eal_get_cpu_affinity(cpu_idx); - - if (affinity->Group == (USHORT)-1) { - affinity->Group = cpu_affinity->Group; - } else if (affinity->Group != cpu_affinity->Group) { - RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n"); - ret = ENOTSUP; - goto cleanup; - } - - affinity->Mask |= cpu_affinity->Mask; - } - - if (affinity->Mask == 0) { - ret = EINVAL; - goto cleanup; - } - -cleanup: - return ret; -} - int rte_thread_set_affinity_by_id(rte_thread_t thread_id, const rte_cpuset_t *cpuset) -- 1.8.3.1