From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 72DAE4C9F for ; Thu, 4 Apr 2019 07:14:55 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 4 Apr 2019 08:14:50 +0300 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x345Eogk010614; Thu, 4 Apr 2019 08:14:50 +0300 From: Shahaf Shuler To: wenzhuo.lu@intel.com, jingjing.wu@intel.com, bernard.iremonger@intel.com Cc: dev@dpdk.org, rasland@mellanox.com, thomas@monjalon.net, ferruh.yigit@intel.com, stable@dpdk.org Date: Thu, 4 Apr 2019 08:14:41 +0300 Message-Id: <12ce667683e47d94bb0debea9af481b87a3fba6f.1554354506.git.shahafs@mellanox.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v2 1/3] app/testpmd: fix mempool free on exit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 04 Apr 2019 05:14:56 -0000 Allocated mempools were never free. it is bad practice. Fixes: af75078fece3 ("first public release") Cc: stable@dpdk.org Signed-off-by: Shahaf Shuler --- app/test-pmd/testpmd.c | 25 +++++++++++++++++++------ app/test-pmd/testpmd.h | 2 ++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 40c873b972..5c68eb9ec6 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -188,6 +188,8 @@ struct fwd_engine * fwd_engines[] = { NULL, }; +struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; + struct fwd_config cur_fwd_config; struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ uint32_t retry_enabled; @@ -835,7 +837,7 @@ setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) /* * Configuration initialisation done once at init time. */ -static void +static struct rte_mempool * mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, unsigned int socket_id) { @@ -904,6 +906,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); } } + return rte_mp; err: if (rte_mp == NULL) { @@ -913,6 +916,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, } else if (verbose_level > 0) { rte_mempool_dump(stdout, rte_mp); } + return NULL; } /* @@ -1130,14 +1134,18 @@ init_config(void) uint8_t i; for (i = 0; i < num_sockets; i++) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_ids[i]); + mempools[i] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, + socket_ids[i]); } else { if (socket_num == UMA_NO_CONFIG) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + mempools[0] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, 0); else - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_num); + mempools[socket_num] = mbuf_pool_create + (mbuf_data_size, + nb_mbuf_per_pool, + socket_num); } init_port_config(); @@ -2396,6 +2404,7 @@ pmd_test_exit(void) struct rte_device *device; portid_t pt_id; int ret; + int i; if (test_done == 0) stop_packet_forwarding(); @@ -2449,6 +2458,10 @@ pmd_test_exit(void) return; } } + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) + rte_mempool_free(mempools[i]); + } printf("\nBye...\n"); } diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index a45988ebc5..84ce8ffa2f 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -264,6 +264,8 @@ extern struct fwd_engine ieee1588_fwd_engine; extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */ +extern struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; + /** * Forwarding Configuration * -- 2.12.0 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id C319EA0679 for ; Thu, 4 Apr 2019 07:15:18 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 3FCD14F91; Thu, 4 Apr 2019 07:15:06 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 72DAE4C9F for ; Thu, 4 Apr 2019 07:14:55 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from shahafs@mellanox.com) with ESMTPS (AES256-SHA encrypted); 4 Apr 2019 08:14:50 +0300 Received: from unicorn01.mtl.labs.mlnx. (unicorn01.mtl.labs.mlnx [10.7.12.62]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x345Eogk010614; Thu, 4 Apr 2019 08:14:50 +0300 From: Shahaf Shuler To: wenzhuo.lu@intel.com, jingjing.wu@intel.com, bernard.iremonger@intel.com Cc: dev@dpdk.org, rasland@mellanox.com, thomas@monjalon.net, ferruh.yigit@intel.com, stable@dpdk.org Date: Thu, 4 Apr 2019 08:14:41 +0300 Message-Id: <12ce667683e47d94bb0debea9af481b87a3fba6f.1554354506.git.shahafs@mellanox.com> X-Mailer: git-send-email 2.12.0 In-Reply-To: References: Subject: [dpdk-dev] [PATCH v2 1/3] app/testpmd: fix mempool free on exit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190404051441.ETChW1T79pTED2arFJe3iNb0hbHs-RjiSgFY3yy0Ut0@z> Allocated mempools were never free. it is bad practice. Fixes: af75078fece3 ("first public release") Cc: stable@dpdk.org Signed-off-by: Shahaf Shuler --- app/test-pmd/testpmd.c | 25 +++++++++++++++++++------ app/test-pmd/testpmd.h | 2 ++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 40c873b972..5c68eb9ec6 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -188,6 +188,8 @@ struct fwd_engine * fwd_engines[] = { NULL, }; +struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; + struct fwd_config cur_fwd_config; struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ uint32_t retry_enabled; @@ -835,7 +837,7 @@ setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) /* * Configuration initialisation done once at init time. */ -static void +static struct rte_mempool * mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, unsigned int socket_id) { @@ -904,6 +906,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); } } + return rte_mp; err: if (rte_mp == NULL) { @@ -913,6 +916,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, } else if (verbose_level > 0) { rte_mempool_dump(stdout, rte_mp); } + return NULL; } /* @@ -1130,14 +1134,18 @@ init_config(void) uint8_t i; for (i = 0; i < num_sockets; i++) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_ids[i]); + mempools[i] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, + socket_ids[i]); } else { if (socket_num == UMA_NO_CONFIG) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + mempools[0] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, 0); else - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_num); + mempools[socket_num] = mbuf_pool_create + (mbuf_data_size, + nb_mbuf_per_pool, + socket_num); } init_port_config(); @@ -2396,6 +2404,7 @@ pmd_test_exit(void) struct rte_device *device; portid_t pt_id; int ret; + int i; if (test_done == 0) stop_packet_forwarding(); @@ -2449,6 +2458,10 @@ pmd_test_exit(void) return; } } + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) + rte_mempool_free(mempools[i]); + } printf("\nBye...\n"); } diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index a45988ebc5..84ce8ffa2f 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -264,6 +264,8 @@ extern struct fwd_engine ieee1588_fwd_engine; extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */ +extern struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; + /** * Forwarding Configuration * -- 2.12.0