From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id BF93DA09D2; Wed, 11 Nov 2020 08:14:56 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3B0B14CA6; Wed, 11 Nov 2020 08:14:41 +0100 (CET) Received: from hqnvemgate26.nvidia.com (hqnvemgate26.nvidia.com [216.228.121.65]) by dpdk.org (Postfix) with ESMTP id 5C7EF4C90 for ; Wed, 11 Nov 2020 08:14:39 +0100 (CET) Received: from hqmail.nvidia.com (Not Verified[216.228.121.13]) by hqnvemgate26.nvidia.com (using TLS: TLSv1.2, AES256-SHA) id ; Tue, 10 Nov 2020 23:14:41 -0800 Received: from nvidia.com (172.20.13.39) by HQMAIL107.nvidia.com (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1473.3; Wed, 11 Nov 2020 07:14:34 +0000 From: Gregory Etelson To: CC: , , , "Shahaf Shuler" , Viacheslav Ovsiienko , Xueming Li Date: Wed, 11 Nov 2020 09:14:14 +0200 Message-ID: <20201111071417.21177-2-getelson@nvidia.com> X-Mailer: git-send-email 2.29.2 In-Reply-To: <20201111071417.21177-1-getelson@nvidia.com> References: <20201111071417.21177-1-getelson@nvidia.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain X-Originating-IP: [172.20.13.39] X-ClientProxiedBy: HQMAIL107.nvidia.com (172.20.187.13) To HQMAIL107.nvidia.com (172.20.187.13) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nvidia.com; s=n1; t=1605078881; bh=lEspkHkzolrVtxTzn9WavyPBoVHP5vPm745LiD0RuBU=; h=From:To:CC:Subject:Date:Message-ID:X-Mailer:In-Reply-To: References:MIME-Version:Content-Transfer-Encoding:Content-Type: X-Originating-IP:X-ClientProxiedBy; b=JS+7PeZ9mkaYplFLzun/6xDQVIyQkpNEAanU+rfnQhI6exWIc+I4SbuRYqM2hR+Ly zusuI3visKquhDWe/iTIELLpWl9h93KYymbJihSA2k/Zi2LGcQ/OW2vgN1nVlP5QqX YaluMDHeN5/qdPj72gwKo6aloJfYSPJDbQmG7fTzm6TJTZvxC4m6i1aKN7Vc9Fa8/T ecsh7kY+u4bUGdXc6pVCieKMdTUGTJEZHlZRlbutGNfXY55v1iOAtKaEzLBOt8qTMR oPFxefHfIS1CuophtiyeeqKwOZcA+NK3z3Z/37pTTBw2NbMCb7iHnpwkbq5f8MxjrS 5eygvA0ZPNzaw== Subject: [dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The original patch allocated tunnel offload objects with invalid indexes. As the result, PMD tunnel object allocation failed. In this patch indexed pool provides both an index and memory for a new tunnel offload object. Also tunnel offload ipool moved to dv enabled code only. Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator") Signed-off-by: Gregory Etelson --- drivers/net/mlx5/mlx5.c | 50 ++++++++++++++++++------------------ drivers/net/mlx5/mlx5.h | 4 +-- drivers/net/mlx5/mlx5_flow.c | 41 ++++++++++------------------- 3 files changed, 40 insertions(+), 55 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 43344391df..e1faa819a3 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex =3D PTHR= EAD_MUTEX_INITIALIZER; =20 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] =3D { #ifdef HAVE_IBV_FLOW_DV_SUPPORT - { + [MLX5_IPOOL_DECAP_ENCAP] =3D { .size =3D sizeof(struct mlx5_flow_dv_encap_decap_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_encap_decap_ipool", }, - { + [MLX5_IPOOL_PUSH_VLAN] =3D { .size =3D sizeof(struct mlx5_flow_dv_push_vlan_action_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_push_vlan_ipool", }, - { + [MLX5_IPOOL_TAG] =3D { .size =3D sizeof(struct mlx5_flow_dv_tag_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_tag_ipool", }, - { + [MLX5_IPOOL_PORT_ID] =3D { .size =3D sizeof(struct mlx5_flow_dv_port_id_action_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_port_id_ipool", }, - { + [MLX5_IPOOL_JUMP] =3D { .size =3D sizeof(struct mlx5_flow_tbl_data_entry), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_jump_ipool", }, - { + [MLX5_IPOOL_SAMPLE] =3D { .size =3D sizeof(struct mlx5_flow_dv_sample_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_sample_ipool", }, - { + [MLX5_IPOOL_DEST_ARRAY] =3D { .size =3D sizeof(struct mlx5_flow_dv_dest_array_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipoo= l_cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_dest_array_ipool", }, + [MLX5_IPOOL_TUNNEL_OFFLOAD] =3D { + .size =3D sizeof(struct mlx5_flow_tunnel), + .need_lock =3D 1, + .release_mem_en =3D 1, + .type =3D "mlx5_tunnel_offload", + }, + [MLX5_IPOOL_TUNNEL_FLOW_TBL_ID] =3D { + .size =3D 0, + .need_lock =3D 1, + .type =3D "mlx5_flow_tnl_tbl_ipool", + }, #endif - { + [MLX5_IPOOL_MTR] =3D { .size =3D sizeof(struct mlx5_flow_meter), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_meter_ipool", }, - { + [MLX5_IPOOL_MCP] =3D { .size =3D sizeof(struct mlx5_flow_mreg_copy_resource), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_mcp_ipool", }, - { + [MLX5_IPOOL_HRXQ] =3D { .size =3D (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_hrxq_ipool", }, - { + [MLX5_IPOOL_MLX5_FLOW] =3D { /* * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. * It set in run time according to PCI function configuration. @@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_flow_handle_ipool", }, - { + [MLX5_IPOOL_RTE_FLOW] =3D { .size =3D sizeof(struct rte_flow), .trunk_size =3D 4096, .need_lock =3D 1, @@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipo= ol_cfg[] =3D { .free =3D mlx5_free, .type =3D "rte_flow_ipool", }, - { + [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] =3D { .size =3D 0, .need_lock =3D 1, .type =3D "mlx5_flow_rss_id_ipool", }, - { - .size =3D 0, - .need_lock =3D 1, - .type =3D "mlx5_flow_tnl_flow_ipool", - }, - { - .size =3D 0, - .need_lock =3D 1, - .type =3D "mlx5_flow_tnl_tbl_ipool", - }, - { + [MLX5_IPOOL_RSS_SHARED_ACTIONS] =3D { .size =3D sizeof(struct mlx5_shared_action_rss), .trunk_size =3D 64, .grow_trunk =3D 3, @@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool= _cfg[] =3D { .free =3D mlx5_free, .type =3D "mlx5_shared_action_rss", }, - }; =20 =20 diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 7ee63a7a14..af097d6a7e 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -44,6 +44,8 @@ enum mlx5_ipool_index { MLX5_IPOOL_JUMP, /* Pool for jump resource. */ MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */ MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */ + MLX5_IPOOL_TUNNEL_OFFLOAD, /* Pool for tunnel offload context */ + MLX5_IPOOL_TUNNEL_FLOW_TBL_ID, /* Pool for tunnel table ID. */ #endif MLX5_IPOOL_MTR, /* Pool for meter resource. */ MLX5_IPOOL_MCP, /* Pool for metadata resource. */ @@ -51,8 +53,6 @@ enum mlx5_ipool_index { MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */ - MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */ - MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */ MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */ MLX5_IPOOL_MAX, }; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 92adfcacca..31c9d82b4a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -6934,7 +6934,7 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist = *list, struct mlx5_dev_ctx_shared *sh =3D list->ctx; struct tunnel_tbl_entry *tte =3D container_of(entry, typeof(*tte), hash); =20 - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], tunnel_flow_tbl_to_id(tte->flow_table)); mlx5_free(tte); } @@ -6952,12 +6952,12 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlis= t *list, SOCKET_ID_ANY); if (!tte) goto err; - mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], &tte->flow_table); if (tte->flow_table >=3D MLX5_MAX_TABLES) { DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.", tte->flow_table); - mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID], + mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID], tte->flow_table); goto err; } else if (!tte->flow_table) { @@ -7465,14 +7465,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel) { struct mlx5_priv *priv =3D dev->data->dev_private; + struct mlx5_indexed_pool *ipool; =20 DRV_LOG(DEBUG, "port %u release pmd tunnel id=3D0x%x", dev->data->port_id, tunnel->tunnel_id); - RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED)); - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID], - tunnel->tunnel_id); mlx5_hlist_destroy(tunnel->groups); - mlx5_free(tunnel); + ipool =3D priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; + mlx5_ipool_free(ipool, tunnel->tunnel_id); } =20 static struct mlx5_flow_tunnel * @@ -7494,39 +7493,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev, const struct rte_flow_tunnel *app_tunnel) { struct mlx5_priv *priv =3D dev->data->dev_private; + struct mlx5_indexed_pool *ipool; struct mlx5_flow_tunnel *tunnel; uint32_t id; =20 - mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], - &id); + ipool =3D priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD]; + tunnel =3D mlx5_ipool_zmalloc(ipool, &id); + if (!tunnel) + return NULL; if (id >=3D MLX5_MAX_TUNNELS) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); + mlx5_ipool_free(ipool, id); DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id); return NULL; - } else if (!id) { - return NULL; - } - /** - * mlx5 flow tunnel is an auxlilary data structure - * It's not part of IO. No need to allocate it from - * huge pages pools dedicated for IO - */ - tunnel =3D mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel), - 0, SOCKET_ID_ANY); - if (!tunnel) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - return NULL; } tunnel->groups =3D mlx5_hlist_create("tunnel groups", 1024, 0, 0, mlx5_flow_tunnel_grp2tbl_create_cb, NULL, mlx5_flow_tunnel_grp2tbl_remove_cb); if (!tunnel->groups) { - mlx5_ipool_free(priv->sh->ipool - [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id); - mlx5_free(tunnel); + mlx5_ipool_free(ipool, id); return NULL; } tunnel->groups->ctx =3D priv->sh; --=20 2.29.2