From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6D367A0577; Wed, 15 Apr 2020 08:40:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E1B371D17A; Wed, 15 Apr 2020 08:40:16 +0200 (CEST) Received: from git-send-mailer.rdmz.labs.mlnx (unknown [37.142.13.130]) by dpdk.org (Postfix) with ESMTP id 46D171D17E for ; Wed, 15 Apr 2020 08:40:13 +0200 (CEST) From: Suanming Mou To: Matan Azrad , Shahaf Shuler , Viacheslav Ovsiienko Cc: wentaoc@mellanox.com, rasland@mellanox.com, dev@dpdk.org Date: Wed, 15 Apr 2020 14:39:52 +0800 Message-Id: <1586932797-99533-6-git-send-email-suanmingm@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1586932797-99533-1-git-send-email-suanmingm@mellanox.com> References: <1586932797-99533-1-git-send-email-suanmingm@mellanox.com> Subject: [dpdk-dev] [PATCH 05/10] net/mlx5: allocate rte flow from indexed pool X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Currently, rte flow with RSS action may have different queue number. The indexed memory pool is not suitable for the flow with RSS action. For flows without RSS action, the size are fixed. Allocate the none RSS rte flow memory from indexed memory pool helps save MALLOC_ELEM_OVERHEAD which is more than 64 bytes for rte flow. Signed-off-by: Suanming Mou --- drivers/net/mlx5/mlx5.c | 10 ++++++++++ drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow.c | 24 ++++++++++++++++++------ drivers/net/mlx5/mlx5_flow.h | 1 + 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 3ca2ed0..1493d25 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -277,6 +277,16 @@ struct mlx5_dev_spawn_data { .free = rte_free, .type = "mlx5_flow_handle_ipool", }, + { + .size = (sizeof(struct rte_flow) + + RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *))), + .trunk_size = 4096, + .need_lock = 1, + .release_mem_en = 1, + .malloc = rte_malloc_socket, + .free = rte_free, + .type = "rte_flow_ipool", + }, }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 41c87ec..378a13f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -61,6 +61,7 @@ enum mlx5_ipool_index { #endif MLX5_IPOOL_HRXQ, /* Pool for hrxq resource. */ MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */ + MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */ MLX5_IPOOL_MAX, }; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 9f79031..cc2b207 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -4225,6 +4225,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, struct mlx5_flow_rss_meta *rss_meta = priv->rss_meta; const struct rte_flow_action *p_actions_rx = actions; uint32_t i; + uint32_t idx = 0; uint32_t flow_size; int hairpin_flow = 0; uint32_t hairpin_id = 0; @@ -4247,17 +4248,21 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, } flow_size = sizeof(struct rte_flow); rss = flow_get_rss_action(p_actions_rx); - if (rss) + if (rss) { flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t), sizeof(void *)); - else - flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); - flow = rte_calloc(__func__, 1, flow_size, 0); + flow = rte_calloc(__func__, 1, flow_size, 0); + } else { + flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + &idx); + } if (!flow) { rte_errno = ENOMEM; goto error_before_flow; } flow->drv_type = flow_get_drv_type(dev, attr); + if (idx) + flow->idx = idx; if (hairpin_id != 0) flow->hairpin_flow_id = hairpin_id; MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN && @@ -4368,7 +4373,10 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, ret = rte_errno; /* Save rte_errno before cleanup. */ flow_mreg_del_copy_action(dev, flow); flow_drv_destroy(dev, flow); - rte_free(flow); + if (idx) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx); + else + rte_free(flow); rte_errno = ret; /* Restore rte_errno. */ error_before_flow: ret = rte_errno; @@ -4492,7 +4500,11 @@ struct rte_flow * TAILQ_REMOVE(list, flow, next); flow_mreg_del_copy_action(dev, flow); rte_free(flow->fdir); - rte_free(flow); + if (flow->idx) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], + flow->idx); + else + rte_free(flow); } /** diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 4516b51..e220647 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -765,6 +765,7 @@ struct rte_flow { struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ uint32_t hairpin_flow_id; /**< The flow id used for hairpin. */ uint32_t copy_applied:1; /**< The MARK copy Flow os applied. */ + uint32_t idx; /**< Index to the rte flow allocated from indexed pool. */ }; typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, -- 1.8.3.1