From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 88271A04B5; Tue, 27 Oct 2020 13:39:38 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2DF74BE5F; Tue, 27 Oct 2020 13:29:35 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id CE60E3253 for ; Tue, 27 Oct 2020 13:28:37 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from suanmingm@nvidia.com) with SMTP; 27 Oct 2020 14:28:35 +0200 Received: from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09RCRZ7U024637; Tue, 27 Oct 2020 14:28:34 +0200 From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: dev@dpdk.org Date: Tue, 27 Oct 2020 20:27:27 +0800 Message-Id: <1603801650-442376-33-git-send-email-suanmingm@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> References: <1601984948-313027-1-git-send-email-suanmingm@nvidia.com> <1603801650-442376-1-git-send-email-suanmingm@nvidia.com> Subject: [dpdk-dev] [PATCH v3 32/34] net/mlx5: make tunnel hub list thread safe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit uses spinlock to protect the tunnel hub list in multiple thread. Signed-off-by: Suanming Mou Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5_flow.c | 20 +++++++++++++++++--- drivers/net/mlx5/mlx5_flow.h | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 5483f75..87446f7 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -669,10 +669,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item == pmd_items) + if (&tun->item == pmd_items) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_items != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -690,10 +694,14 @@ enum mlx5_expansion { struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action == pmd_actions) + if (&tun->action == pmd_actions) { + LIST_REMOVE(tun, chain); break; + } } + rte_spinlock_unlock(&thub->sl); if (!tun || num_actions != 1) return rte_flow_error_set(err, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -5871,8 +5879,12 @@ struct rte_flow * mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx); if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; + + rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7931,7 +7943,6 @@ struct mlx5_meter_domains_infos * DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x", dev->data->port_id, tunnel->tunnel_id); RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - LIST_REMOVE(tunnel, chain); mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups); @@ -8020,6 +8031,7 @@ struct mlx5_meter_domains_infos * struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev); struct mlx5_flow_tunnel *tun; + rte_spinlock_lock(&thub->sl); LIST_FOREACH(tun, &thub->tunnels, chain) { if (!memcmp(app_tunnel, &tun->app_tunnel, sizeof(*app_tunnel))) { @@ -8037,6 +8049,7 @@ struct mlx5_meter_domains_infos * ret = -ENOMEM; } } + rte_spinlock_unlock(&thub->sl); if (tun) __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); @@ -8065,6 +8078,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh) if (!thub) return -ENOMEM; LIST_INIT(&thub->tunnels); + rte_spinlock_init(&thub->sl); thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, NULL, diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 2de8988..c15f5e7 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -946,6 +946,7 @@ struct mlx5_flow_tunnel { /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { LIST_HEAD(, mlx5_flow_tunnel) tunnels; + rte_spinlock_t sl; /* Tunnel list spinlock. */ struct mlx5_hlist *groups; /** non tunnel groups */ }; -- 1.8.3.1