From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 172B3A0561; Thu, 18 Mar 2021 02:01:50 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C5667140FBE; Thu, 18 Mar 2021 02:01:39 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id 3362040698 for ; Thu, 18 Mar 2021 02:01:36 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1059) id 8653C209C373; Wed, 17 Mar 2021 18:01:35 -0700 (PDT) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 8653C209C373 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1616029295; bh=p+cF3Ojbi6OrNkUObaW8v7kD67Jsi4tWwDOiuwfCnac=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=STERqIc35ErJit/WXM2bL8LKQiy1Lyoa6LiXcsRtnfxwWlCoYgNj/f0TL7uErFv8K WXZKGtKIYF/sH1Yuwl4p0WX07SdNBya044RzNvVnf5F7ltWdYfMI5QADNbzTkHu/oX M4XFlD01RLgEiISZtbdI6PT1QUMymnVYsNi1pB4g= From: Narcisa Ana Maria Vasile To: dev@dpdk.org, thomas@monjalon.net, dmitry.kozliuk@gmail.com, khot@microsoft.com, navasile@microsoft.com, dmitrym@microsoft.com, roretzla@microsoft.com, ocardona@microsoft.com Cc: bruce.richardson@intel.com, david.marchand@redhat.com, pallavi.kadam@intel.com Date: Wed, 17 Mar 2021 18:00:38 -0700 Message-Id: <1616029240-26588-2-git-send-email-navasile@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1616029240-26588-1-git-send-email-navasile@linux.microsoft.com> References: <1616029240-26588-1-git-send-email-navasile@linux.microsoft.com> Subject: [dpdk-dev] [PATCH 1/3] Add EAL threads API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Narcisa Vasile EAL must hide the environment specifics from apps and libraries. Add an EAL API for managing threads. Signed-off-by: Narcisa Vasile Signed-off-by: Dmitry Malloy --- lib/librte_eal/common/eal_common_thread.c | 6 +- lib/librte_eal/common/rte_thread.c | 346 ++++++++++++ lib/librte_eal/include/rte_thread.h | 323 ++++++++++- lib/librte_eal/include/rte_thread_types.h | 20 + lib/librte_eal/windows/eal_lcore.c | 167 ++++-- lib/librte_eal/windows/eal_windows.h | 10 + .../include/rte_windows_thread_types.h | 19 + lib/librte_eal/windows/rte_thread.c | 507 +++++++++++++++++- 8 files changed, 1338 insertions(+), 60 deletions(-) create mode 100644 lib/librte_eal/common/rte_thread.c create mode 100644 lib/librte_eal/include/rte_thread_types.h create mode 100644 lib/librte_eal/windows/include/rte_windows_thread_types.h diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c index 73a055902..5219e783e 100644 --- a/lib/librte_eal/common/eal_common_thread.c +++ b/lib/librte_eal/common/eal_common_thread.c @@ -84,7 +84,7 @@ thread_update_affinity(rte_cpuset_t *cpusetp) } int -rte_thread_set_affinity(rte_cpuset_t *cpusetp) +rte_thread_self_set_affinity(rte_cpuset_t *cpusetp) { if (pthread_setaffinity_np(pthread_self(), sizeof(rte_cpuset_t), cpusetp) != 0) { @@ -97,7 +97,7 @@ rte_thread_set_affinity(rte_cpuset_t *cpusetp) } void -rte_thread_get_affinity(rte_cpuset_t *cpusetp) +rte_thread_self_get_affinity(rte_cpuset_t *cpusetp) { assert(cpusetp); memmove(cpusetp, &RTE_PER_LCORE(_cpuset), @@ -140,7 +140,7 @@ eal_thread_dump_current_affinity(char *str, unsigned int size) { rte_cpuset_t cpuset; - rte_thread_get_affinity(&cpuset); + rte_thread_self_get_affinity(&cpuset); return eal_thread_dump_affinity(&cpuset, str, size); } diff --git a/lib/librte_eal/common/rte_thread.c b/lib/librte_eal/common/rte_thread.c new file mode 100644 index 000000000..275876534 --- /dev/null +++ b/lib/librte_eal/common/rte_thread.c @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2021 Mellanox Technologies, Ltd + * Copyright(c) 2021 Microsoft Corporation + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +struct eal_tls_key { + pthread_key_t thread_index; +}; + +rte_thread_t +rte_thread_self(void) +{ + return pthread_self(); +} + +int +rte_thread_equal(rte_thread_t t1, rte_thread_t t2) +{ + return pthread_equal(t1, t2); +} + +int +rte_thread_set_affinity(rte_thread_t thread_id, size_t cpuset_size, + const rte_cpuset_t *cpuset) +{ + return pthread_setaffinity_np(thread_id, cpuset_size, cpuset); +} + +int rte_thread_get_affinity(rte_thread_t threadid, size_t cpuset_size, + rte_cpuset_t *cpuset) +{ + return pthread_getaffinity_np(threadid, cpuset_size, cpuset); +} + +int +rte_thread_set_priority(rte_thread_t thread_id, + enum rte_thread_priority priority) +{ + int policy; + struct sched_param param = { + .sched_priority = 0, + }; + + + if (priority == RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { + policy = SCHED_RR; + param.sched_priority = priority; + } else if (priority == RTE_THREAD_PRIORITY_NORMAL) { + policy = SCHED_OTHER; + param.sched_priority = priority; + } else { + RTE_LOG(DEBUG, EAL, "Invalid priority to set." + "Defaulting to priority 'normal'.\n"); + policy = SCHED_OTHER; + } + + return pthread_setschedparam(thread_id, policy, ¶m); +} + +int +rte_thread_attr_init(rte_thread_attr_t *attr) +{ + int ret = 0; + pthread_attr_t pthread_attr; + + if (attr == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid thread attributes parameter\n"); + return -EINVAL; + } + + ret = pthread_getattr_default_np(&pthread_attr); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_getattr_default_np failed\n"); + return ret; + } + + ret = pthread_attr_getaffinity_np(&pthread_attr, sizeof(attr->cpuset), &attr->cpuset); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_getaffinity_np failed\n"); + return ret; + } + + attr->priority = RTE_THREAD_PRIORITY_NORMAL; + + return 0; +} + +int +rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL || cpuset == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid thread attributes parameter\n"); + return -EINVAL; + } + thread_attr->cpuset = *cpuset; + return 0; +} + +int +rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset) +{ + if ((thread_attr == NULL) || (cpuset == NULL)) { + RTE_LOG(DEBUG, EAL, "Invalid thread attributes parameter\n"); + return -EINVAL; + } + + *cpuset = thread_attr->cpuset; + return 0; +} + +int +rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, enum rte_thread_priority priority) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set priority attribute, invalid parameter\n"); + return -EINVAL; + } + + thread_attr->priority = priority; + return 0; +} + +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, void *(*thread_func) (void*), + void *args) +{ + int ret = 0; + pthread_attr_t attr; + pthread_attr_t *attrp = NULL; + struct sched_param param = { + .sched_priority = 0, + }; + int policy = SCHED_OTHER; + + if (thread_attr != NULL) { + ret = pthread_attr_init(&attr); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_init failed\n"); + goto cleanup; + } + + attrp = &attr; + + /* Set the inherit scheduler parameter to explicit, + * otherwise the priority attribute is ignored. + */ + ret = pthread_attr_setinheritsched(attrp, PTHREAD_EXPLICIT_SCHED); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setinheritsched failed\n"); + goto cleanup; + } + + /* In case a realtime scheduling policy is requested, the sched_priority + * parameter is set to the value stored in thread_attr. Otherwise, for + * the default scheduling policy (SCHED_OTHER) sched_priority needs to + * be intialized to 0. */ + if (thread_attr->priority == RTE_THREAD_PRIORITY_REALTIME_CRITICAL) { + policy = SCHED_RR; + param.sched_priority = thread_attr->priority; + } + + ret = pthread_attr_setschedpolicy(attrp, policy); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedpolicy failed\n"); + goto cleanup; + } + + ret = pthread_attr_setschedparam(attrp, ¶m); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setschedparam failed\n"); + goto cleanup; + } + + ret = pthread_attr_setaffinity_np(attrp, sizeof(thread_attr->cpuset), &thread_attr->cpuset); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_attr_setaffinity_np failed\n"); + goto cleanup; + } + } + + ret = pthread_create(thread_id, attrp, thread_func, args); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_create failed\n"); + goto cleanup; + } + +cleanup: + if (attrp != NULL) { + pthread_attr_destroy(&attr); + } + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, int *value_ptr) +{ + int ret = 0; + void *res = NULL; + void **pres = NULL; + + if (value_ptr != NULL) { + pres = &res; + } + + ret = pthread_join(thread_id, pres); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "pthread_join failed\n"); + return ret; + } + + if (pres != NULL) { + *value_ptr = *(int*)(*pres); + } + return 0; +} + +int +rte_thread_mutex_init(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_init(mutex, NULL); +} + +int +rte_thread_mutex_lock(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_lock(mutex); +} + +int +rte_thread_mutex_unlock(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_unlock(mutex); +} + +int +rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) +{ + return pthread_mutex_destroy(mutex); +} + +int +rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count) +{ + return pthread_barrier_init(barrier, NULL, count); +} + +int rte_thread_barrier_wait(rte_thread_barrier_t *barrier) +{ + return pthread_barrier_wait(barrier); +} + +int rte_thread_barrier_destroy(rte_thread_barrier_t *barrier) +{ + return pthread_barrier_destroy(barrier); +} + +int rte_thread_cancel(rte_thread_t thread_id) +{ + /* + * TODO: Behavior is different between POSIX and Windows threads. + * POSIX threads wait for a cancellation point. + * Current Windows emulation kills thread at any point. + */ + return pthread_cancel(thread_id); +} + +int +rte_thread_tls_key_create(rte_tls_key *key, void (*destructor)(void *)) +{ + int err; + + *key = malloc(sizeof(**key)); + if ((*key) == NULL) { + RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n"); + return -EINVAL; + } + err = pthread_key_create(&((*key)->thread_index), destructor); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_key_create failed: %s\n", + strerror(err)); + free(*key); + return -ENOMEM; + } + return 0; +} + +int +rte_thread_tls_key_delete(rte_tls_key key) +{ + int err; + + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + return -EINVAL; + } + err = pthread_key_delete(key->thread_index); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_key_delete failed: %s\n", + strerror(err)); + free(key); + return -EINVAL; + } + free(key); + return 0; +} + +int +rte_thread_tls_value_set(rte_tls_key key, const void *value) +{ + int err; + + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + return -EINVAL; + } + err = pthread_setspecific(key->thread_index, value); + if (err != 0) { + RTE_LOG(DEBUG, EAL, "pthread_setspecific failed: %s\n", + strerror(err)); + return -ENOMEM; + } + return 0; +} + +void * +rte_thread_tls_value_get(rte_tls_key key) +{ + if (key == NULL) { + RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); + rte_errno = EINVAL; + return NULL; + } + return pthread_getspecific(key->thread_index); +} diff --git a/lib/librte_eal/include/rte_thread.h b/lib/librte_eal/include/rte_thread.h index e640ea185..66b112bc4 100644 --- a/lib/librte_eal/include/rte_thread.h +++ b/lib/librte_eal/include/rte_thread.h @@ -20,11 +20,330 @@ extern "C" { #endif +#include +#if defined(RTE_USE_WINDOWS_THREAD_TYPES) +#include +#else +#include +#endif + +enum rte_thread_priority +{ + RTE_THREAD_PRIORITY_NORMAL = EAL_THREAD_PRIORITY_NORMAL, + RTE_THREAD_PRIORITY_REALTIME_CRITICAL = EAL_THREAD_PRIORITY_REALTIME_CIRTICAL, + /* + * This enum can be extended to allow more priority levels. + */ +}; + +typedef struct +{ + enum rte_thread_priority priority; + rte_cpuset_t cpuset; +} rte_thread_attr_t; + /** * TLS key type, an opaque pointer. */ typedef struct eal_tls_key *rte_tls_key; +/** + * Get the id of the calling thread. + * + * @return + * Return the thread id of the calling thread. + */ +__rte_experimental +rte_thread_t rte_thread_self(void); + +/** + * Check if 2 thread ids are equal. + * + * @param t1 + * First thread id. + * + * @param t2 + * Second thread id. + * + * @return + * If the ids are equal, return nonzero. + * Otherwise, return 0. + */ +__rte_experimental +int rte_thread_equal(rte_thread_t t1, rte_thread_t t2); + +/** + * Set the affinity of thread 'thread_id' to the cpu set + * specified by 'cpuset'. + * + * @param thread_id + * Id of the thread for which to set the affinity. + * + * @param cpuset_size + * + * @param cpuset + * Pointer to CPU affinity to set. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_set_affinity(rte_thread_t thread_id, size_t cpuset_size, + const rte_cpuset_t *cpuset); + +/** + * Get the affinity of thread 'thread_id' and store it + * in 'cpuset'. + * + * @param thread_id + * Id of the thread for which to get the affinity. + * + * @param cpuset_size + * Size of the cpu set. + * + * @param cpuset + * Pointer for storing the affinity value. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_get_affinity(rte_thread_t thread_id, size_t cpuset_size, + rte_cpuset_t *cpuset); + +/** + * Set the priority of a thread. + * + * @param thread_id + * Id of the thread for which to set priority. + * + * @param priority + * Priority value to be set. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_set_priority(rte_thread_t thread_id, enum rte_thread_priority priority); + +/** + * Initialize the attributes of a thread. + * These attributes can be passed to the rte_thread_create() function + * that will create a new thread and set its attributes according to attr; + * + * @param attr + * Thread attributes to initialize. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_attr_init(rte_thread_attr_t *attr); + +/** + * Set the CPU affinity value in the thread attributes pointed to + * by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes in which affinity will be updated. + * + * @param cpuset + * Points to the value of the affinity to be set. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset); + +/** + * Get the value of CPU affinity that is set in the thread attributes pointed to + * by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes from which affinity will be retrieved. + * + * @param cpuset + * Pointer to the memory that will store the affinity. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset); + +/** + * Set the thread priority value in the thread attributes pointed to + * by 'thread_attr'. + * + * @param thread_attr + * Points to the thread attributes in which priority will be updated. + * + * @param cpuset + * Points to the value of the priority to be set. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, enum rte_thread_priority priority); + +/** + * Create a new thread that will invoke the 'thread_func' routine. + * + * @param thread_id + * A pointer that will store the id of the newly created thread. + * + * @param thread_attr + * Attributes that are used at the creation of the new thread. + * + * @param thread_func + * The routine that the new thread will invoke when starting execution. + * + * @param args + * Arguments to be passed to the 'thread_func' routine. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, + void *(*thread_func) (void*), void *args); + +/** + * Waits for the thread identified by 'thread_id' to terminate + * + * @param thread_id + * The identifier of the thread. + * + * @param value_ptr + * Stores the exit status of the thread. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_join(rte_thread_t thread_id, int *value_ptr); + +/** + * Initializes a mutex. + * + * @param mutex + * The mutex to be initialized. + * + * @param attr + * Attributes for initialization of the mutex. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_mutex_init(rte_thread_mutex_t *mutex); + +/** + * Locks a mutex. + * + * @param mutex + * The mutex to be locked. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_mutex_lock(rte_thread_mutex_t *mutex); + +/** + * Unlocks a mutex. + * + * @param mutex + * The mutex to be unlocked. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_mutex_unlock(rte_thread_mutex_t *mutex); + +/** + * Releases all resources associated with a mutex. + * + * @param mutex + * The mutex to be uninitialized. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_mutex_destroy(rte_thread_mutex_t *mutex); + +/** + * Initializes a synchronization barrier. + * + * @param barrier + * A pointer that references the newly created 'barrier' object. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count); + +/** + * Causes the calling thread to wait at the synchronization barrier 'barrier'. + * + * @param barrier + * The barrier used for synchronizing the threads. + * + * @return + * Return RTE_THREAD_BARRIER_SERIAL_THREAD for the thread synchronized at the barrier. + * Return 0 for all other threads. + * Return error number in case of error. + */ +__rte_experimental +int rte_thread_barrier_wait(rte_thread_barrier_t *barrier); + +/** + * Releases all resources used by a synchronization barrier + * and uninitializes it. + * + * @param barrier + * The barrier to be uninitialized. + * + * @return + * On success, return 0. + * On failure, return an error number. + */ +__rte_experimental +int rte_thread_barrier_destroy(rte_thread_barrier_t *barrier); + +/** + * Terminates a thread. + * + * @param thread_id + * The id of the thread to be terminated. + * + * @return + * On success, return 0. + * On failure, return nonzero. + */ +__rte_experimental +int rte_thread_cancel(rte_thread_t thread_id); + /** * Set core affinity of the current thread. * Support both EAL and non-EAL thread and update TLS. @@ -34,7 +353,7 @@ typedef struct eal_tls_key *rte_tls_key; * @return * On success, return 0; otherwise return -1; */ -int rte_thread_set_affinity(rte_cpuset_t *cpusetp); +int rte_thread_self_set_affinity(rte_cpuset_t *cpusetp); /** * Get core affinity of the current thread. @@ -44,7 +363,7 @@ int rte_thread_set_affinity(rte_cpuset_t *cpusetp); * It presumes input is not NULL, otherwise it causes panic. * */ -void rte_thread_get_affinity(rte_cpuset_t *cpusetp); +void rte_thread_self_get_affinity(rte_cpuset_t *cpusetp); /** * Create a TLS data key visible to all threads in the process. diff --git a/lib/librte_eal/include/rte_thread_types.h b/lib/librte_eal/include/rte_thread_types.h new file mode 100644 index 000000000..b055bbf67 --- /dev/null +++ b/lib/librte_eal/include/rte_thread_types.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Microsoft Corporation + */ + +#ifndef _RTE_THREAD_TYPES_H_ +#define _RTE_THREAD_TYPES_H_ + +#include + +#define RTE_THREAD_BARRIER_SERIAL_THREAD PTHREAD_BARRIER_SERIAL_THREAD +#define RTE_THREAD_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER + +#define EAL_THREAD_PRIORITY_NORMAL 0 +#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL 99 + +typedef pthread_t rte_thread_t; +typedef pthread_mutex_t rte_thread_mutex_t; +typedef pthread_barrier_t rte_thread_barrier_t; + +#endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/librte_eal/windows/eal_lcore.c b/lib/librte_eal/windows/eal_lcore.c index a85149be9..3e63f9f70 100644 --- a/lib/librte_eal/windows/eal_lcore.c +++ b/lib/librte_eal/windows/eal_lcore.c @@ -2,7 +2,6 @@ * Copyright(c) 2019 Intel Corporation */ -#include #include #include @@ -28,10 +27,12 @@ struct socket_map { }; struct cpu_map { - unsigned int socket_count; unsigned int lcore_count; + unsigned int socket_count; + unsigned int cpu_count; struct lcore_map lcores[RTE_MAX_LCORE]; struct socket_map sockets[RTE_MAX_NUMA_NODES]; + GROUP_AFFINITY cpus[CPU_SETSIZE]; }; static struct cpu_map cpu_map = { 0 }; @@ -48,13 +49,110 @@ log_early(const char *format, ...) va_end(va); } +static int +eal_query_group_affinity(void) +{ + SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos = NULL; + DWORD infos_size = 0; + int ret = 0; + + if (!GetLogicalProcessorInformationEx(RelationGroup, NULL, &infos_size)) { + DWORD error = GetLastError(); + if (error != ERROR_INSUFFICIENT_BUFFER) { + log_early("Cannot get group information size, error %lu\n", + error); + rte_errno = EINVAL; + ret = -1; + goto cleanup; + } + } + + infos = malloc(infos_size); + if (infos == NULL) { + log_early("Cannot allocate memory for NUMA node information\n"); + rte_errno = ENOMEM; + ret = -1; + goto cleanup; + } + + if (!GetLogicalProcessorInformationEx(RelationGroup, infos, &infos_size)) { + log_early("Cannot get group information, error %lu\n", GetLastError()); + rte_errno = EINVAL; + ret = -1; + goto cleanup; + } + + cpu_map.cpu_count = 0; + USHORT group_count = infos->Group.ActiveGroupCount; + for (USHORT group_number = 0; group_number < group_count; group_number++) { + KAFFINITY affinity = infos->Group.GroupInfo[group_number].ActiveProcessorMask; + + for (unsigned int i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { + if ((affinity & ((KAFFINITY)1 << i)) == 0) + continue; + cpu_map.cpus[cpu_map.cpu_count].Group = group_number; + cpu_map.cpus[cpu_map.cpu_count].Mask = (KAFFINITY)1 << i; + cpu_map.cpu_count++; + } + } + +cleanup: + free(infos); + return ret; +} + +static bool +eal_check_for_duplicate_numa(const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *info) +{ + const unsigned int node_id = info->NumaNode.NodeNumber; + const GROUP_AFFINITY *cores = &info->NumaNode.GroupMask; + struct lcore_map *lcore; + unsigned int socket_id; + + /* NUMA node may be reported multiple times if it includes + * cores from different processor groups, e. g. 80 cores + * of a physical processor comprise one NUMA node, but two + * processor groups, because group size is limited by 32/64. + */ + for (socket_id = 0; socket_id < cpu_map.socket_count; socket_id++) { + if (cpu_map.sockets[socket_id].node_id == node_id) + break; + } + + if (socket_id == cpu_map.socket_count) { + if (socket_id == RTE_DIM(cpu_map.sockets)) { + return true; + } + + cpu_map.sockets[socket_id].node_id = node_id; + cpu_map.socket_count++; + } + + for (unsigned int i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { + if ((cores->Mask & ((KAFFINITY)1 << i)) == 0) + continue; + + if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores)) { + return true; + } + + lcore = &cpu_map.lcores[cpu_map.lcore_count]; + lcore->socket_id = socket_id; + lcore->core_id = cores->Group * EAL_PROCESSOR_GROUP_SIZE + i; + cpu_map.lcore_count++; + } + return false; +} + int eal_create_cpu_map(void) { SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos, *info; DWORD infos_size; bool full = false; + int ret = 0; + infos = NULL; infos_size = 0; if (!GetLogicalProcessorInformationEx( RelationNumaNode, NULL, &infos_size)) { @@ -79,57 +177,27 @@ eal_create_cpu_map(void) log_early("Cannot get NUMA node information, error %lu\n", GetLastError()); rte_errno = EINVAL; - return -1; + ret = -1; + goto exit; } info = infos; while ((uint8_t *)info - (uint8_t *)infos < infos_size) { - unsigned int node_id = info->NumaNode.NodeNumber; - GROUP_AFFINITY *cores = &info->NumaNode.GroupMask; - struct lcore_map *lcore; - unsigned int i, socket_id; - - /* NUMA node may be reported multiple times if it includes - * cores from different processor groups, e. g. 80 cores - * of a physical processor comprise one NUMA node, but two - * processor groups, because group size is limited by 32/64. - */ - for (socket_id = 0; socket_id < cpu_map.socket_count; - socket_id++) { - if (cpu_map.sockets[socket_id].node_id == node_id) - break; - } - - if (socket_id == cpu_map.socket_count) { - if (socket_id == RTE_DIM(cpu_map.sockets)) { - full = true; - goto exit; - } - - cpu_map.sockets[socket_id].node_id = node_id; - cpu_map.socket_count++; - } - - for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) { - if ((cores->Mask & ((KAFFINITY)1 << i)) == 0) - continue; - - if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores)) { - full = true; - goto exit; - } - - lcore = &cpu_map.lcores[cpu_map.lcore_count]; - lcore->socket_id = socket_id; - lcore->core_id = - cores->Group * EAL_PROCESSOR_GROUP_SIZE + i; - cpu_map.lcore_count++; - } + if (eal_check_for_duplicate_numa(info)) + break; info = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)( (uint8_t *)info + info->Size); } + if (eal_query_group_affinity()) { + /* + * No need to set rte_errno here. + * It is set by eal_query_group_affinity(). + */ + ret = -1; + goto exit; + } exit: if (full) { /* Not a fatal error, but important for troubleshooting. */ @@ -139,7 +207,7 @@ eal_create_cpu_map(void) free(infos); - return 0; + return ret; } int @@ -165,3 +233,12 @@ eal_socket_numa_node(unsigned int socket_id) { return cpu_map.sockets[socket_id].node_id; } + +PGROUP_AFFINITY +eal_get_cpu_affinity(size_t cpu_index) +{ + if (cpu_index < CPU_SETSIZE) + return &cpu_map.cpus[cpu_index]; + + return NULL; +} diff --git a/lib/librte_eal/windows/eal_windows.h b/lib/librte_eal/windows/eal_windows.h index 478accc1b..dc5dc8240 100644 --- a/lib/librte_eal/windows/eal_windows.h +++ b/lib/librte_eal/windows/eal_windows.h @@ -55,6 +55,16 @@ int eal_thread_create(pthread_t *thread); */ unsigned int eal_socket_numa_node(unsigned int socket_id); +/** + * Get pointer to the group affinity for the cpu. + * + * @param cpu_index + * Index of the cpu, as it comes from rte_cpuset_t. + * @return + * Pointer to the group affinity for the cpu. + */ +PGROUP_AFFINITY eal_get_cpu_affinity(size_t cpu_index); + /** * Schedule code for execution in the interrupt thread. * diff --git a/lib/librte_eal/windows/include/rte_windows_thread_types.h b/lib/librte_eal/windows/include/rte_windows_thread_types.h new file mode 100644 index 000000000..b6209e6eb --- /dev/null +++ b/lib/librte_eal/windows/include/rte_windows_thread_types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Microsoft Corporation + */ + +#ifndef _RTE_THREAD_TYPES_H_ +#define _RTE_THREAD_TYPES_H_ + +#include + +#define RTE_THREAD_BARRIER_SERIAL_THREAD TRUE + +#define EAL_THREAD_PRIORITY_NORMAL THREAD_PRIORITY_NORMAL +#define EAL_THREAD_PRIORITY_REALTIME_CIRTICAL THREAD_PRIORITY_TIME_CRITICAL + +typedef DWORD rte_thread_t; +typedef CRITICAL_SECTION rte_thread_mutex_t; +typedef SYNCHRONIZATION_BARRIER rte_thread_barrier_t; + +#endif /* _RTE_THREAD_TYPES_H_ */ diff --git a/lib/librte_eal/windows/rte_thread.c b/lib/librte_eal/windows/rte_thread.c index 2e2ab2917..decb4b24d 100644 --- a/lib/librte_eal/windows/rte_thread.c +++ b/lib/librte_eal/windows/rte_thread.c @@ -1,16 +1,503 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2021 Mellanox Technologies, Ltd + * Copyright(c) 2021 Microsoft Corporation */ #include -#include #include -#include + +#include "eal_windows.h" struct eal_tls_key { DWORD thread_index; }; +/* Translates the most common error codes related to threads */ +static int rte_thread_translate_win32_error(DWORD error) +{ + switch (error) { + case ERROR_SUCCESS: + return 0; + + case ERROR_INVALID_PARAMETER: + return -EINVAL; + + case ERROR_INVALID_HANDLE: + return -EFAULT; + + case ERROR_NOT_ENOUGH_MEMORY: + /* FALLTHROUGH */ + case ERROR_NO_SYSTEM_RESOURCES: + return -ENOMEM; + + case ERROR_PRIVILEGE_NOT_HELD: + /* FALLTHROUGH */ + case ERROR_ACCESS_DENIED: + return -EACCES; + + case ERROR_ALREADY_EXISTS: + return -EEXIST; + + case ERROR_POSSIBLE_DEADLOCK: + return -EDEADLK; + + case ERROR_INVALID_FUNCTION: + /* FALLTHROUGH */ + case ERROR_CALL_NOT_IMPLEMENTED: + return -ENOSYS; + + default: + return -EINVAL; + } + + return -EINVAL; +} + +rte_thread_t +rte_thread_self(void) +{ + return GetCurrentThreadId(); +} + +int +rte_thread_equal(rte_thread_t t1, rte_thread_t t2) +{ + return t1 == t2 ? 1 : 0; +} + +static int +rte_convert_cpuset_to_affinity(const rte_cpuset_t *cpuset, + PGROUP_AFFINITY affinity) +{ + int ret = 0; + PGROUP_AFFINITY cpu_affinity = NULL; + + memset(affinity, 0, sizeof(GROUP_AFFINITY)); + affinity->Group = (USHORT)-1; + + /* Check that all cpus of the set belong to the same processor group and + * accumulate thread affinity to be applied. + */ + for (unsigned int cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { + if (!CPU_ISSET(cpu_idx, cpuset)) + continue; + + cpu_affinity = eal_get_cpu_affinity(cpu_idx); + + if (affinity->Group == (USHORT)-1) { + affinity->Group = cpu_affinity->Group; + } else if (affinity->Group != cpu_affinity->Group) { + ret = -EINVAL; + goto cleanup; + } + + affinity->Mask |= cpu_affinity->Mask; + } + + if (affinity->Mask == 0) { + ret = -EINVAL; + goto cleanup; + } + +cleanup: + return ret; +} + +int rte_thread_set_affinity(rte_thread_t thread_id, + size_t cpuset_size, + const rte_cpuset_t *cpuset) +{ + int ret = 0; + GROUP_AFFINITY thread_affinity; + HANDLE thread_handle = NULL; + + if (cpuset == NULL || cpuset_size < sizeof(*cpuset)) { + ret = -EINVAL; + goto cleanup; + } + + ret = rte_convert_cpuset_to_affinity(cpuset, &thread_affinity); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); + goto cleanup; + } + + thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("OpenThread()"); + goto cleanup; + } + + if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("SetThreadGroupAffinity()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_get_affinity(rte_thread_t thread_id, size_t cpuset_size, + rte_cpuset_t *cpuset) +{ + HANDLE thread_handle = NULL; + PGROUP_AFFINITY cpu_affinity; + GROUP_AFFINITY thread_affinity; + int ret = 0; + + if (cpuset == NULL || cpuset_size < sizeof(*cpuset)) { + ret = -EINVAL; + goto cleanup; + } + + thread_handle = OpenThread(THREAD_ALL_ACCESS, FALSE, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("OpenThread()"); + goto cleanup; + } + + /* obtain previous thread affinity */ + if (!GetThreadGroupAffinity(thread_handle, &thread_affinity)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("GetThreadGroupAffinity()"); + goto cleanup; + } + + CPU_ZERO(cpuset); + + /* Convert affinity to DPDK cpu set */ + for (unsigned int cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) { + + cpu_affinity = eal_get_cpu_affinity(cpu_idx); + + if ((cpu_affinity->Group == thread_affinity.Group) && + ((cpu_affinity->Mask & thread_affinity.Mask) != 0)) { + CPU_SET(cpu_idx, cpuset); + } + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + +static HANDLE +get_process_handle_from_thread_handle(HANDLE thread_handle) +{ + DWORD process_id = 0; + + process_id = GetProcessIdOfThread(thread_handle); + if (process_id == 0) { + RTE_LOG_WIN32_ERR("GetProcessIdOfThread()"); + return NULL; + } + + return OpenProcess(PROCESS_SET_INFORMATION, FALSE, process_id); +} + +int +rte_thread_set_priority(rte_thread_t thread_id, enum rte_thread_priority priority) +{ + HANDLE thread_handle = NULL; + HANDLE process_handle = NULL; + DWORD priority_class = NORMAL_PRIORITY_CLASS; + int ret = 0; + + thread_handle = OpenThread(THREAD_SET_INFORMATION | + THREAD_QUERY_INFORMATION, FALSE, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("OpenThread()"); + goto cleanup; + } + + switch (priority) { + + case RTE_THREAD_PRIORITY_REALTIME_CRITICAL: + priority_class = REALTIME_PRIORITY_CLASS; + break; + + case RTE_THREAD_PRIORITY_NORMAL: + /* FALLTHROUGH */ + default: + priority_class = NORMAL_PRIORITY_CLASS; + priority = RTE_THREAD_PRIORITY_NORMAL; + break; + } + + process_handle = get_process_handle_from_thread_handle(thread_handle); + if (process_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("get_process_handle_from_thread_handle()"); + goto cleanup; + } + + if (!SetPriorityClass(process_handle, priority_class)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("SetPriorityClass()"); + goto cleanup; + } + + if (!SetThreadPriority(thread_handle, priority)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("SetThreadPriority()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + if (process_handle != NULL) { + CloseHandle(process_handle); + process_handle = NULL; + } + return ret; +} + +int +rte_thread_attr_init(rte_thread_attr_t *attr) +{ + if (attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to init thread attributes, invalid parameter\n"); + return -EINVAL; + } + + attr->priority = RTE_THREAD_PRIORITY_NORMAL; + CPU_ZERO(&attr->cpuset); + return 0; +} + +int +rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set affinity attribute, invalid parameter\n"); + return -EINVAL; + } + + thread_attr->cpuset = *cpuset; + return 0; +} + +int +rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, rte_cpuset_t *cpuset) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set affinity attribute, invalid parameter\n"); + return -EINVAL; + } + + *cpuset = thread_attr->cpuset; + return 0; +} + +int +rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, enum rte_thread_priority priority) +{ + if (thread_attr == NULL) { + RTE_LOG(DEBUG, EAL, + "Unable to set priority attribute, invalid parameter\n"); + return -EINVAL; + } + + thread_attr->priority = priority; + return 0; +} + +int +rte_thread_create(rte_thread_t *thread_id, + const rte_thread_attr_t *thread_attr, void *(*thread_func) (void*), + void *args) +{ + int ret = 0; + HANDLE thread_handle = NULL; + GROUP_AFFINITY thread_affinity; + + thread_handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)thread_func, args, 0, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("CreateThread()"); + goto cleanup; + } + + if (thread_attr != NULL) { + if (CPU_COUNT(&thread_attr->cpuset) > 0) { + ret = rte_convert_cpuset_to_affinity(&thread_attr->cpuset, &thread_affinity); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n"); + goto cleanup; + } + + if (!SetThreadGroupAffinity(thread_handle, &thread_affinity, NULL)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("SetThreadGroupAffinity()"); + goto cleanup; + } + } + ret = rte_thread_set_priority(*thread_id, thread_attr->priority); + if (ret != 0) { + RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n"); + goto cleanup; + } + } + + return 0; + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + +int +rte_thread_join(rte_thread_t thread_id, int *value_ptr) +{ + HANDLE thread_handle = NULL; + DWORD result = 0; + DWORD exit_code = 0; + BOOL err = 0; + int ret = 0; + + thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, FALSE, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("OpenThread()"); + goto cleanup; + } + + result = WaitForSingleObject(thread_handle, INFINITE); + if (result != WAIT_OBJECT_0) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("WaitForSingleObject()"); + goto cleanup; + } + + if (value_ptr != NULL) { + err = GetExitCodeThread(thread_handle, &exit_code); + if (err == 0) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("GetExitCodeThread()"); + goto cleanup; + } + *value_ptr = exit_code; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + + return ret; +} + +int +rte_thread_mutex_init(rte_thread_mutex_t *mutex) +{ + InitializeCriticalSection(mutex); + return 0; +} + +int +rte_thread_mutex_lock(rte_thread_mutex_t *mutex) +{ + EnterCriticalSection(mutex); + return 0; +} + +int +rte_thread_mutex_unlock(rte_thread_mutex_t *mutex) +{ + LeaveCriticalSection(mutex); + return 0; +} + +int +rte_thread_mutex_destroy(rte_thread_mutex_t *mutex) +{ + DeleteCriticalSection(mutex); + return 0; +} + +int +rte_thread_barrier_init(rte_thread_barrier_t *barrier, int count) +{ + int ret = 0; + + if (!InitializeSynchronizationBarrier(barrier, count, -1)) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("InitializeSynchronizationBarrier()"); + return ret; + } + return 0; +} + +int +rte_thread_barrier_wait(rte_thread_barrier_t *barrier) +{ + return EnterSynchronizationBarrier(barrier, + SYNCHRONIZATION_BARRIER_FLAGS_BLOCK_ONLY); +} + +int +rte_thread_barrier_destroy(rte_thread_barrier_t *barrier) +{ + DeleteSynchronizationBarrier(barrier); + return 0; +} + +int +rte_thread_cancel(rte_thread_t thread_id) +{ + int ret = 0; + HANDLE thread_handle = NULL; + + thread_handle = OpenThread(THREAD_TERMINATE, FALSE, thread_id); + if (thread_handle == NULL) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("OpenThread()"); + goto cleanup; + } + + /* + * TODO: Behavior is different between POSIX and Windows threads. + * POSIX threads wait for a cancellation point. + * Current Windows emulation kills thread at any point. + */ + ret = TerminateThread(thread_handle, 0); + if (ret != 0) { + ret = rte_thread_translate_win32_error(GetLastError()); + RTE_LOG_WIN32_ERR("TerminateThread()"); + goto cleanup; + } + +cleanup: + if (thread_handle != NULL) { + CloseHandle(thread_handle); + thread_handle = NULL; + } + return ret; +} + int rte_thread_tls_key_create(rte_tls_key *key, __rte_unused void (*destructor)(void *)) @@ -18,13 +505,13 @@ rte_thread_tls_key_create(rte_tls_key *key, *key = malloc(sizeof(**key)); if ((*key) == NULL) { RTE_LOG(DEBUG, EAL, "Cannot allocate TLS key.\n"); - return -1; + return -ENOMEM; } (*key)->thread_index = TlsAlloc(); if ((*key)->thread_index == TLS_OUT_OF_INDEXES) { RTE_LOG_WIN32_ERR("TlsAlloc()"); free(*key); - return -1; + return rte_thread_translate_win32_error(GetLastError());; } return 0; } @@ -32,14 +519,14 @@ rte_thread_tls_key_create(rte_tls_key *key, int rte_thread_tls_key_delete(rte_tls_key key) { - if (!key) { + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); - return -1; + return -EINVAL; } if (!TlsFree(key->thread_index)) { RTE_LOG_WIN32_ERR("TlsFree()"); free(key); - return -1; + return rte_thread_translate_win32_error(GetLastError()); } free(key); return 0; @@ -50,7 +537,7 @@ rte_thread_tls_value_set(rte_tls_key key, const void *value) { char *p; - if (!key) { + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); return -1; } @@ -58,7 +545,7 @@ rte_thread_tls_value_set(rte_tls_key key, const void *value) p = (char *) (uintptr_t) value; if (!TlsSetValue(key->thread_index, p)) { RTE_LOG_WIN32_ERR("TlsSetValue()"); - return -1; + return rte_thread_translate_win32_error(GetLastError()); } return 0; } @@ -68,7 +555,7 @@ rte_thread_tls_value_get(rte_tls_key key) { void *output; - if (!key) { + if (key == NULL) { RTE_LOG(DEBUG, EAL, "Invalid TLS key.\n"); rte_errno = EINVAL; return NULL; -- 2.30.0.vfs.0.2