From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id AA576A09EF; Sat, 26 Dec 2020 17:57:37 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EEDE158F9; Sat, 26 Dec 2020 17:57:35 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 6F9852BA3 for ; Sat, 26 Dec 2020 17:57:33 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from talshn@nvidia.com) with SMTP; 26 Dec 2020 18:57:32 +0200 Received: from nvidia.com (l-wincomp04-vm.mtl.labs.mlnx [10.237.1.5]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BQGvWJd002375; Sat, 26 Dec 2020 18:57:32 +0200 From: Tal Shnaiderman To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, rasland@nvidia.com, ophirmu@nvidia.com Date: Sat, 26 Dec 2020 18:57:14 +0200 Message-Id: <20201226165714.4416-1-talshn@nvidia.com> X-Mailer: git-send-email 2.16.1.windows.4 Subject: [dpdk-dev] [PATCH] mlx5: split multi-threaded flows per OS X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" multi-threaded flows feature uses pthread function pthread_key_create but for Windows the destruction option in the function is unimplemented. to resolve it Windows will implement destruction mechanism to cleanup mlx5_flow_workspace object for each terminated thread. Linux flow will keep the current behavior. Signed-off-by: Tal Shnaiderman Acked-by: Matan Azrad --- Depends-on: patch-85737 ("eal: add generic thread-local-storage functions") --- drivers/net/mlx5/linux/meson.build | 1 + drivers/net/mlx5/linux/mlx5_flow_os.c | 39 +++++++ drivers/net/mlx5/mlx5.c | 8 ++ drivers/net/mlx5/mlx5_flow.c | 29 +----- drivers/net/mlx5/mlx5_flow.h | 10 ++ drivers/net/mlx5/windows/mlx5_flow_os.c | 174 ++++++++++++++++++++++++++++++++ 6 files changed, 237 insertions(+), 24 deletions(-) create mode 100644 drivers/net/mlx5/linux/mlx5_flow_os.c diff --git a/drivers/net/mlx5/linux/meson.build b/drivers/net/mlx5/linux/meson.build index 6c4402169e..8412edce78 100644 --- a/drivers/net/mlx5/linux/meson.build +++ b/drivers/net/mlx5/linux/meson.build @@ -9,5 +9,6 @@ sources += files( 'mlx5_verbs.c', 'mlx5_mp_os.c', 'mlx5_vlan_os.c', + 'mlx5_flow_os.c', ) diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c new file mode 100644 index 0000000000..9e98966c38 --- /dev/null +++ b/drivers/net/mlx5/linux/mlx5_flow_os.c @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2020 Mellanox Technologies, Ltd + */ + +#include "mlx5_flow_os.h" + +#include + +/* Key of thread specific flow workspace data. */ +static rte_tls_key key_workspace; + +int +mlx5_flow_os_init_workspace_once(void) +{ + if (rte_thread_tls_create_key(&key_workspace, flow_release_workspace)) { + DRV_LOG(ERR, "Can't create flow workspace data thread key."); + return ENOMEM; + } + return 0; +} + +void * +mlx5_flow_os_get_specific_workspace(void) +{ + return rte_thread_tls_get_value(key_workspace); +} + +int +mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data) +{ + return rte_thread_tls_set_value(key_workspace, data); +} + +void +mlx5_flow_os_release_workspace(void) +{ + rte_thread_tls_delete_key(key_workspace); +} + diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 2aa269b13e..0fdcd0fe8d 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1004,6 +1004,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, err = rte_errno; goto error; } + if (LIST_EMPTY(&mlx5_dev_ctx_list)) { + err = mlx5_flow_os_init_workspace_once(); + if (err) + goto error; + } mlx5_flow_aging_init(sh); mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); @@ -1079,6 +1084,9 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_mr_release_cache(&sh->share_cache); /* Remove context from the global device list. */ LIST_REMOVE(sh, next); + /* Release flow workspaces objects on the last device. */ + if (LIST_EMPTY(&mlx5_dev_ctx_list)) + mlx5_flow_os_release_workspace(); pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); /* * Ensure there is no async event handler installed. diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f110c6b714..a2a294eac2 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -696,11 +696,6 @@ static struct mlx5_flow_tunnel_info tunnels_info[] = { }, }; -/* Key of thread specific flow workspace data. */ -static pthread_key_t key_workspace; - -/* Thread specific flow workspace data once initialization data. */ -static pthread_once_t key_workspace_init; /** @@ -5698,7 +5693,7 @@ mlx5_flow_start_default(struct rte_eth_dev *dev) /** * Release key of thread specific flow workspace data. */ -static void +void flow_release_workspace(void *data) { struct mlx5_flow_workspace *wks = data; @@ -5712,16 +5707,6 @@ flow_release_workspace(void *data) } } -/** - * Initialize key of thread specific flow workspace data. - */ -static void -flow_alloc_workspace(void) -{ - if (pthread_key_create(&key_workspace, flow_release_workspace)) - DRV_LOG(ERR, "Can't create flow workspace data thread key."); -} - /** * Get thread specific current flow workspace. * @@ -5732,7 +5717,7 @@ mlx5_flow_get_thread_workspace(void) { struct mlx5_flow_workspace *data; - data = pthread_getspecific(key_workspace); + data = mlx5_flow_os_get_specific_workspace(); MLX5_ASSERT(data && data->inuse); if (!data || !data->inuse) DRV_LOG(ERR, "flow workspace not initialized."); @@ -5780,11 +5765,7 @@ mlx5_flow_push_thread_workspace(void) struct mlx5_flow_workspace *curr; struct mlx5_flow_workspace *data; - if (pthread_once(&key_workspace_init, flow_alloc_workspace)) { - DRV_LOG(ERR, "Failed to init flow workspace data thread key."); - return NULL; - } - curr = pthread_getspecific(key_workspace); + curr = mlx5_flow_os_get_specific_workspace(); if (!curr) { data = flow_alloc_thread_workspace(); if (!data) @@ -5803,7 +5784,7 @@ mlx5_flow_push_thread_workspace(void) data->inuse = 1; data->flow_idx = 0; /* Set as current workspace */ - if (pthread_setspecific(key_workspace, data)) + if (mlx5_flow_os_set_specific_workspace(data)) DRV_LOG(ERR, "Failed to set flow workspace to thread."); return data; } @@ -5829,7 +5810,7 @@ mlx5_flow_pop_thread_workspace(void) data->inuse = 0; if (!data->prev) return; - if (pthread_setspecific(key_workspace, data->prev)) + if (mlx5_flow_os_set_specific_workspace(data->prev)) DRV_LOG(ERR, "Failed to set flow workspace to thread."); } diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index ee85c9d8a5..47fb96be20 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -1503,4 +1503,14 @@ void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx); +void +flow_release_workspace(void *data); +int +mlx5_flow_os_init_workspace_once(void); +void * +mlx5_flow_os_get_specific_workspace(void); +int +mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data); +void +mlx5_flow_os_release_workspace(void); #endif /* RTE_PMD_MLX5_FLOW_H_ */ diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c b/drivers/net/mlx5/windows/mlx5_flow_os.c index acd7de61e0..b9e544e107 100644 --- a/drivers/net/mlx5/windows/mlx5_flow_os.c +++ b/drivers/net/mlx5/windows/mlx5_flow_os.c @@ -5,6 +5,8 @@ #include "mlx5_flow_os.h" #include "mlx5_win_ext.h" +#include + /** * Verify the @p attributes will be correctly understood by the NIC and store * them in the @p flow if everything is correct. @@ -238,3 +240,175 @@ mlx5_flow_os_destroy_flow(void *drv_flow_ptr) { return mlx5_glue->devx_fs_rule_del(drv_flow_ptr); } + +struct mlx5_workspace_thread { + HANDLE thread_handle; + struct mlx5_flow_workspace *mlx5_ws; + struct mlx5_workspace_thread *next; +}; + +/** + * Static pointer array for multi thread support of mlx5_flow_workspace. + */ +static struct mlx5_workspace_thread *curr; +static struct mlx5_workspace_thread *first; +rte_tls_key ws_tls_index; +static pthread_mutex_t lock_thread_list; + +static bool +mlx5_is_thread_alive(HANDLE thread_handle) +{ + DWORD result = WaitForSingleObject(thread_handle, 0); + + if (result == WAIT_OBJECT_0) { + return false; + } + return false; +} + +static int +mlx5_get_current_thread(HANDLE *p_handle) +{ + BOOL ret = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), + GetCurrentProcess(), p_handle, 0, 0, DUPLICATE_SAME_ACCESS); + + if (!ret) { + RTE_LOG_WIN32_ERR("DuplicateHandle()"); + return -1; + } + return 0; +} + +static void +mlx5_clear_thread_list(void) +{ + struct mlx5_workspace_thread *temp = first; + struct mlx5_workspace_thread *next, *prev = NULL; + HANDLE curr_thread; + + if (!temp) + return; + if (mlx5_get_current_thread(&curr_thread)) { + DRV_LOG(ERR, "Failed to get current thread " + "handle."); + return; + } + while (temp) { + next = temp->next; + if (temp->thread_handle != curr_thread && + !mlx5_is_thread_alive(temp->thread_handle)) { + if (temp == first) { + if (curr == temp) + curr = temp->next; + first = temp->next; + } else if (temp == curr) { + curr = prev; + } + flow_release_workspace(temp->mlx5_ws); + CloseHandle(temp->thread_handle); + free(temp); + if (prev) + prev->next = next; + temp = next; + continue; + } + prev = temp; + temp = temp->next; + } + CloseHandle(curr_thread); +} + +/** + * Release workspaces before exit. + */ +void +mlx5_flow_os_release_workspace(void) +{ + mlx5_clear_thread_list(); + if (first) { + MLX5_ASSERT(!first->next); + flow_release_workspace(first->mlx5_ws); + free(first); + } + rte_thread_tls_delete_key(ws_tls_index); + pthread_mutex_destroy(&lock_thread_list); +} + +static int +mlx5_add_workspace_to_list(struct mlx5_flow_workspace *data) +{ + HANDLE curr_thread; + struct mlx5_workspace_thread *temp = calloc(1, sizeof(*temp)); + + if (!temp) { + DRV_LOG(ERR, "Failed to allocate thread workspace " + "memory."); + return -1; + } + if (mlx5_get_current_thread(&curr_thread)) { + DRV_LOG(ERR, "Failed to get current thread " + "handle."); + free(temp); + return -1; + } + temp->mlx5_ws = data; + temp->thread_handle = curr_thread; + pthread_mutex_lock(&lock_thread_list); + mlx5_clear_thread_list(); + if (!first) { + first = curr = temp; + } + else { + curr->next = temp; + curr = curr->next; + } + pthread_mutex_unlock(&lock_thread_list); + return 0; +} + +int +mlx5_flow_os_init_workspace_once(void) +{ + int err = rte_thread_tls_create_key(&ws_tls_index, NULL); + + if (err) { + DRV_LOG(ERR, "Can't create flow workspace data thread key."); + return err; + } + pthread_mutex_init(&lock_thread_list, NULL); + return 0; +} + +void * +mlx5_flow_os_get_specific_workspace(void) +{ + return rte_thread_tls_get_value(ws_tls_index); +} + +int +mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data) +{ + int err = 0; + if (!rte_thread_tls_get_value(ws_tls_index)) { + if (rte_errno) { + DRV_LOG(ERR, "Failed checking specific workspace."); + return -1; + } + /* + * set_specific_workspace when current value is NULL + * can happen only once per thread, mark this thread in + * linked list to be able to release reasorces later on. + */ + err = mlx5_add_workspace_to_list(data); + if (err) { + DRV_LOG(ERR, "Failed adding workspace to list."); + return -1; + } + } + if (rte_thread_tls_set_value(ws_tls_index, data)) { + DRV_LOG(ERR, "Failed setting specific workspace."); + err = -1; + } + return err; +} + -- 2.16.1.windows.4