From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id F1BF2A09D2; Wed, 11 Nov 2020 08:15:15 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AAD295928; Wed, 11 Nov 2020 08:14:44 +0100 (CET) Received: from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id 63E035913 for ; Wed, 11 Nov 2020 08:14:41 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Tue, 10 Nov 2020 23:14:43 -0800 Received: from nvidia.com (172.20.13.39) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Wed, 11 Nov 2020 07:14:37 +0000 From: Gregory Etelson To: CC: , , , "Shahaf Shuler" , Viacheslav Ovsiienko , Suanming Mou Date: Wed, 11 Nov 2020 09:14:15 +0200 Message-ID: <20201111071417.21177-3-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201111071417.21177-1-getelson@nvidia.com> References: <20201111071417.21177-1-getelson@nvidia.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain X-Originating-IP: [172.20.13.39] X-ClientProxiedBy: HQMAIL107.nvidia.com (172.20.187.13) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605078883; bh=pUao8z9xjaIkiJfOlGghNSW2sxINSJ+Le1g/WRpApT8=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=gWT65q4T2vU60hOAqYPyzb0+5ztzxomHYwIRKjSpyQmdZ/dVX9bX1f0Fd2Q/xH+u3 v0Uj5ZSjCDIjW+VXEQha0l0EAJ2gekwj4vWKtGsvsNuib2RQ5GfOnbIvxI+JI4V2rl PsD3fBbcLfNitUOYVq/2n7lopLOGphZWKRk39MZVnqrgELnAYKTuqgZgBxYX6Y383A WacQuMqx/F1ejKSuvGdu/cQjtcLcQICcLWH2h0ZpS9An4NbudF1Xs134QQEXaIen+w 1ncEN7RCe+z4suoRy/GnnsNWmoglHxoXFsAGpZaZ/R1rEe+klQKf2oE+Xn7bElz95S u4Gu5NE4t27Cg== Subject: [dpdk-dev] [PATCH 2/4] net/mlx5: fix tunnel offload hub multi-thread protection X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch was removing active tunnel offload objects from a tunnels db list. That action was leading to a PMD crash. Current patch isolates tunnels db list into a separate API. That API manages MT protection of the tunnel offload db. Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe") Signed-off-by: Gregory Etelson --- drivers/net/mlx5/mlx5_flow.c | 256 +++++++++++++++++++++++++---------- drivers/net/mlx5/mlx5_flow.h | 6 +- 2 files changed, 192 insertions(+), 70 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 31c9d82b4a..2f01e34033 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -33,6 +33,14 @@ #include "mlx5_common_os.h" #include "rte_pmd_mlx5.h" =20 +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); static struct mlx5_flow_tunnel * mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); static void @@ -661,29 +669,68 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, return 0; } =20 +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx =3D x; + + RTE_SET_USED(dev); + if (ctx->num_elements !=3D 1) + return false; + else if (ctx->items) + return ctx->items =3D=3D &tunnel->item; + else if (ctx->actions) + return ctx->actions =3D=3D &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx =3D x; + ctx->ret =3D 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx =3D x; + RTE_SET_USED(dev); + ctx->ret =3D rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + static int mlx5_flow_item_release(struct rte_eth_dev *dev, struct rte_flow_item *pmd_items, uint32_t num_items, struct rte_flow_error *err) { - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct tunnel_db_element_release_ctx ctx =3D { + .items =3D pmd_items, + .actions =3D NULL, + .num_elements =3D num_items, + .error =3D err, + }; =20 - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item =3D=3D pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items !=3D 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; } =20 static int @@ -691,25 +738,18 @@ mlx5_flow_action_release(struct rte_eth_dev *dev, struct rte_flow_action *pmd_actions, uint32_t num_actions, struct rte_flow_error *err) { - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct tunnel_db_element_release_ctx ctx =3D { + .items =3D NULL, + .actions =3D pmd_actions, + .num_elements =3D num_actions, + .error =3D err, + }; =20 - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action =3D=3D pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions !=3D 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); =20 - return 0; + return ctx.ret; } =20 static int @@ -5889,11 +5929,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t = *list, if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; =20 - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel =3D mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7464,28 +7501,87 @@ static void mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { + /* no tunnel hub spinlock protection */ struct mlx5_priv *priv =3D dev->data->dev_private; + struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); struct mlx5_indexed_pool *ipool; =20 DRV_LOG(DEBUG, "port %u release pmd tunnel id=3D0x%x", dev->data->port_id, tunnel->tunnel_id); + rte_spinlock_lock(&thub->sl); + LIST_REMOVE(tunnel, chain); + rte_spinlock_unlock(&thub->sl); mlx5_hlist_destroy(tunnel->groups); ipool =3D priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; mlx5_ipool_free(ipool, tunnel->tunnel_id); } =20 -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) { + bool verdict =3D false; struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct mlx5_flow_tunnel *tunnel; =20 - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id =3D=3D id) + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict =3D match(dev, tunnel, (const void *)ctx); + if (verdict) break; } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); =20 - return tun; + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx =3D x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id =3D=3D ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx =3D x; + RTE_SET_USED(dev); + ctx->tunnel =3D tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx =3D { + .tunnel_id =3D id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; } =20 static struct mlx5_flow_tunnel * @@ -7533,38 +7629,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, return tunnel; } =20 +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel =3D tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel =3D mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn =3D 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, struct mlx5_flow_tunnel **tunnel) { - int ret; - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel =3D tun; - ret =3D 0; - break; - } - } - if (!tun) { - tun =3D mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel =3D tun; - } else { - ret =3D -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); + struct tunnel_db_get_tunnel_ctx ctx =3D { + .app_tunnel =3D app_tunnel, + }; =20 - return ret; + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel =3D ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; } =20 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port= _id) diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index e3a5030785..bdf2c50090 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -950,8 +950,12 @@ struct mlx5_flow_tunnel { =20 /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { + /* Tunnels list + * Access to the list MUST be MT protected + */ LIST_HEAD(, mlx5_flow_tunnel) tunnels; - rte_spinlock_t sl; /* Tunnel list spinlock. */ + /* protect access to the tunnels list */ + rte_spinlock_t sl; struct mlx5_hlist *groups; /** non tunnel groups */ }; =20 --=20 2.29.2