From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C56FAA04DB; Mon, 16 Nov 2020 10:51:01 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1D794C92A; Mon, 16 Nov 2020 10:49:39 +0100 (CET) Received: from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id D5E59C91C for ; Mon, 16 Nov 2020 10:49:34 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Mon, 16 Nov 2020 01:49:37 -0800 Received: from nvidia.com (10.124.1.5) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Mon, 16 Nov 2020 09:49:30 +0000 From: Gregory Etelson To: CC: , , , Viacheslav Ovsiienko , Shahaf Shuler , Suanming Mou Date: Mon, 16 Nov 2020 11:49:04 +0200 Message-ID: <20201116094905.12873-6-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201116094905.12873-1-getelson@nvidia.com> References: <20201111071417.21177-1-getelson@nvidia.com> <20201116094905.12873-1-getelson@nvidia.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain X-Originating-IP: [10.124.1.5] X-ClientProxiedBy: HQMAIL101.nvidia.com (172.20.187.10) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605520177; bh=NnLiLYWnADjKAk0/MI75BmCafdMDueW+/uffl4orz5E=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=GEyBuRI4Kr3Bg15G1+QxDhUs2khWxmuaJ4PiP5KvcL5gvTJ0+Vy/eTFO6LgU7eD4q ebPqlpZEcHG02hU4se2Fez3DUfASY6M8FAch46N38KmddzLUm6uTGQsnGG4nOz0ovz CRS4vEpojuD8PQlrS/nPyzpIu8Lkwg1CuawExVN4mUwOVfZu7bynyrHb9q2jCgVXaa VBBwKyUyEWIX5mPEhFstH43aSOLuWpA1lC+WrQp19hS8AVUNVbjnBQtxMgG0dNTnrU NNtqRd3D6j8qyRJpakU8hLZRqTT2g2Nl4qfWKPGzdrw0HsyPbBsHU8wg/lUFVmbEUy xShSiJWVTC3Gg== Subject: [dpdk-dev] [PATCH v4 5/6] net/mlx5: fix tunnel offload hub multi-thread protection X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch was removing active tunnel offload objects from a tunnels db list without checking its reference counter value. That action was leading to a PMD crash. Current patch isolates tunnels db list into a separate API. That API manages MT protection of the tunnel offload db. Fixes: e4f5880 ("net/mlx5: make tunnel hub list thread safe") Signed-off-by: Gregory Etelson Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5_flow.c | 266 +++++++++++++++++++++++++---------- drivers/net/mlx5/mlx5_flow.h | 6 +- 2 files changed, 195 insertions(+), 77 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index cd94a73e53..eea185ba82 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -5639,11 +5639,8 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t = *list, if (flow->tunnel) { struct mlx5_flow_tunnel *tunnel; =20 - rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl); tunnel =3D mlx5_find_tunnel_id(dev, flow->tunnel_id); RTE_VERIFY(tunnel); - LIST_REMOVE(tunnel, chain); - rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl); if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) mlx5_flow_tunnel_free(dev, tunnel); } @@ -7220,6 +7217,15 @@ union tunnel_offload_mark { }; }; =20 +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op); + static int flow_tunnel_add_default_miss(struct rte_eth_dev *dev, struct rte_flow *flow, @@ -7441,18 +7447,72 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev, mlx5_ipool_free(ipool, tunnel->tunnel_id); } =20 -static struct mlx5_flow_tunnel * -mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +static bool +mlx5_access_tunnel_offload_db + (struct rte_eth_dev *dev, + bool (*match)(struct rte_eth_dev *, + struct mlx5_flow_tunnel *, const void *), + void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *), + void (*miss)(struct rte_eth_dev *, void *), + void *ctx, bool lock_op) { + bool verdict =3D false; struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct mlx5_flow_tunnel *tunnel; =20 - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (tun->tunnel_id =3D=3D id) + rte_spinlock_lock(&thub->sl); + LIST_FOREACH(tunnel, &thub->tunnels, chain) { + verdict =3D match(dev, tunnel, (const void *)ctx); + if (verdict) break; } + if (!lock_op) + rte_spinlock_unlock(&thub->sl); + if (verdict && hit) + hit(dev, tunnel, ctx); + if (!verdict && miss) + miss(dev, ctx); + if (lock_op) + rte_spinlock_unlock(&thub->sl); =20 - return tun; + return verdict; +} + +struct tunnel_db_find_tunnel_id_ctx { + uint32_t tunnel_id; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool +find_tunnel_id_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_find_tunnel_id_ctx *ctx =3D x; + + RTE_SET_USED(dev); + return tunnel->tunnel_id =3D=3D ctx->tunnel_id; +} + +static void +find_tunnel_id_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_find_tunnel_id_ctx *ctx =3D x; + RTE_SET_USED(dev); + ctx->tunnel =3D tunnel; +} + +static struct mlx5_flow_tunnel * +mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id) +{ + struct tunnel_db_find_tunnel_id_ctx ctx =3D { + .tunnel_id =3D id, + }; + + mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match, + find_tunnel_id_hit, NULL, &ctx, true); + + return ctx.tunnel; } =20 static struct mlx5_flow_tunnel * @@ -7500,38 +7560,60 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, return tunnel; } =20 +struct tunnel_db_get_tunnel_ctx { + const struct rte_flow_tunnel *app_tunnel; + struct mlx5_flow_tunnel *tunnel; +}; + +static bool get_tunnel_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + RTE_SET_USED(dev); + return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel, + sizeof(*ctx->app_tunnel)); +} + +static void get_tunnel_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + /* called under tunnel spinlock protection */ + struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + RTE_SET_USED(dev); + tunnel->refctn++; + ctx->tunnel =3D tunnel; +} + +static void get_tunnel_miss(struct rte_eth_dev *dev, void *x) +{ + /* called under tunnel spinlock protection */ + struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); + struct tunnel_db_get_tunnel_ctx *ctx =3D x; + + rte_spinlock_unlock(&thub->sl); + ctx->tunnel =3D mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel); + ctx->tunnel->refctn =3D 1; + rte_spinlock_lock(&thub->sl); + if (ctx->tunnel) + LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain); +} + + static int mlx5_get_flow_tunnel(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel, struct mlx5_flow_tunnel **tunnel) { - int ret; - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; - - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (!memcmp(app_tunnel, &tun->app_tunnel, - sizeof(*app_tunnel))) { - *tunnel =3D tun; - ret =3D 0; - break; - } - } - if (!tun) { - tun =3D mlx5_flow_tunnel_allocate(dev, app_tunnel); - if (tun) { - LIST_INSERT_HEAD(&thub->tunnels, tun, chain); - *tunnel =3D tun; - } else { - ret =3D -ENOMEM; - } - } - rte_spinlock_unlock(&thub->sl); - if (tun) - __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED); + struct tunnel_db_get_tunnel_ctx ctx =3D { + .app_tunnel =3D app_tunnel, + }; =20 - return ret; + mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit, + get_tunnel_miss, &ctx, true); + *tunnel =3D ctx.tunnel; + return ctx.tunnel ? 0 : -ENOMEM; } =20 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port= _id) @@ -7631,56 +7713,88 @@ mlx5_flow_tunnel_match(struct rte_eth_dev *dev, *num_of_items =3D 1; return 0; } + +struct tunnel_db_element_release_ctx { + struct rte_flow_item *items; + struct rte_flow_action *actions; + uint32_t num_elements; + struct rte_flow_error *error; + int ret; +}; + +static bool +tunnel_element_release_match(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, const void *x) +{ + const struct tunnel_db_element_release_ctx *ctx =3D x; + + RTE_SET_USED(dev); + if (ctx->num_elements !=3D 1) + return false; + else if (ctx->items) + return ctx->items =3D=3D &tunnel->item; + else if (ctx->actions) + return ctx->actions =3D=3D &tunnel->action; + + return false; +} + +static void +tunnel_element_release_hit(struct rte_eth_dev *dev, + struct mlx5_flow_tunnel *tunnel, void *x) +{ + struct tunnel_db_element_release_ctx *ctx =3D x; + ctx->ret =3D 0; + if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED)) + mlx5_flow_tunnel_free(dev, tunnel); +} + +static void +tunnel_element_release_miss(struct rte_eth_dev *dev, void *x) +{ + struct tunnel_db_element_release_ctx *ctx =3D x; + RTE_SET_USED(dev); + ctx->ret =3D rte_flow_error_set(ctx->error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "invalid argument"); +} + static int mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev, - struct rte_flow_item *pmd_items, - uint32_t num_items, struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct rte_flow_item *pmd_items, + uint32_t num_items, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx =3D { + .items =3D pmd_items, + .actions =3D NULL, + .num_elements =3D num_items, + .error =3D err, + }; =20 - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->item =3D=3D pmd_items) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_items !=3D 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); - return 0; + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); + + return ctx.ret; } =20 static int mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev, - struct rte_flow_action *pmd_actions, - uint32_t num_actions, - struct rte_flow_error *err) -{ - struct mlx5_flow_tunnel_hub *thub =3D mlx5_tunnel_hub(dev); - struct mlx5_flow_tunnel *tun; + struct rte_flow_action *pmd_actions, + uint32_t num_actions, struct rte_flow_error *err) +{ + struct tunnel_db_element_release_ctx ctx =3D { + .items =3D NULL, + .actions =3D pmd_actions, + .num_elements =3D num_actions, + .error =3D err, + }; =20 - rte_spinlock_lock(&thub->sl); - LIST_FOREACH(tun, &thub->tunnels, chain) { - if (&tun->action =3D=3D pmd_actions) { - LIST_REMOVE(tun, chain); - break; - } - } - rte_spinlock_unlock(&thub->sl); - if (!tun || num_actions !=3D 1) - return rte_flow_error_set(err, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "invalid argument"); - if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED)) - mlx5_flow_tunnel_free(dev, tun); + mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match, + tunnel_element_release_hit, + tunnel_element_release_miss, &ctx, false); =20 - return 0; + return ctx.ret; } =20 static int diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index c33c0fee7c..f64384217f 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -950,8 +950,12 @@ struct mlx5_flow_tunnel { =20 /** PMD tunnel related context */ struct mlx5_flow_tunnel_hub { + /* Tunnels list + * Access to the list MUST be MT protected + */ LIST_HEAD(, mlx5_flow_tunnel) tunnels; - rte_spinlock_t sl; /* Tunnel list spinlock. */ + /* protect access to the tunnels list */ + rte_spinlock_t sl; struct mlx5_hlist *groups; /** non tunnel groups */ }; =20 --=20 2.29.2