From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id A9E3EA04B5; Wed, 28 Oct 2020 01:00:21 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 32E95C9E0; Wed, 28 Oct 2020 00:50:36 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id C84016CBF for ; Wed, 28 Oct 2020 00:49:09 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 28 Oct 2020 01:49:02 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RNltW5026660; Wed, 28 Oct 2020 01:49:01 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org, rasland@nvidia.com Date: Wed, 28 Oct 2020 07:47:43 +0800 Message-Id: <1603842466-19879-33-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603842466-19879-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603842466-19879-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v4 32/34] net/mlx5: make tunnel hub list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit uses spinlock to protect the tunnel hub list in multiple thread. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 20 +++++++++++++++++--- drivers/net/mlx5/mlx5_flow.h | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 1e82030..a6e60af 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -669,10 +669,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) + if (&tun->item == pmd_items) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_items != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -690,10 +694,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) + if (&tun->action == pmd_actions) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_actions != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -5880,8 +5888,12 @@ struct rte_flow * mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7940,7 +7952,6 @@ struct mlx5_meter_domains_infos * DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - LIST_REMOVE(tunnel, chain); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups); @@ -8029,6 +8040,7 @@ struct mlx5_meter_domains_infos * struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { if (!memcmp(app_tunnel, &tun->app_tunnel, sizeof(*app_tunnel))) { @@ -8046,6 +8058,7 @@ struct mlx5_meter_domains_infos * ret = -ENOMEM; } } + rte_spinlock_unlock(&thub->sl); if (tun) __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); @@ -8074,6 +8087,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); + rte_spinlock_init(&thub->sl); thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, NULL, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 4a8c2bf..8ef2a85 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -945,6 +945,7 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { LIST_HEAD(, mlx5_flow_tunnel) tunnels; + rte_spinlock_t sl; /* Tunnel list spinlock. */ struct mlx5_hlist *groups; /** non tunnel groups */ }; -- 1.8.3.1