From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <aburakov@ecsmtp.ir.intel.com>
Received: from mga01.intel.com (mga01.intel.com [192.55.52.88])
 by dpdk.org (Postfix) with ESMTP id BF18C1B015;
 Wed, 17 Jan 2018 09:36:20 +0100 (CET)
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from orsmga008.jf.intel.com ([10.7.209.65])
 by fmsmga101.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 17 Jan 2018 00:36:19 -0800
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.46,372,1511856000"; d="scan'208";a="10762543"
Received: from irvmail001.ir.intel.com ([163.33.26.43])
 by orsmga008.jf.intel.com with ESMTP; 17 Jan 2018 00:36:18 -0800
Received: from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com
 [10.237.217.45])
 by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id
 w0H8aHXp003141; Wed, 17 Jan 2018 08:36:17 GMT
Received: from sivswdev01.ir.intel.com (localhost [127.0.0.1])
 by sivswdev01.ir.intel.com with ESMTP id w0H8aHqp003662;
 Wed, 17 Jan 2018 08:36:17 GMT
Received: (from aburakov@localhost)
 by sivswdev01.ir.intel.com with LOCAL id w0H8aHSo003658;
 Wed, 17 Jan 2018 08:36:17 GMT
From: Anatoly Burakov <anatoly.burakov@intel.com>
To: dev@dpdk.org
Cc: Olivier Matz <olivier.matz@6wind.com>, stable@dpdk.org
Date: Wed, 17 Jan 2018 08:36:14 +0000
Message-Id: <1802387bed3434d84fda9accdf236dfde7be3d39.1515845958.git.anatoly.burakov@intel.com>
X-Mailer: git-send-email 1.7.0.7
In-Reply-To: <c46dff058bdbc6fe7c66c551575c410347cd589e.1515845958.git.anatoly.burakov@intel.com>
References: <c46dff058bdbc6fe7c66c551575c410347cd589e.1515845958.git.anatoly.burakov@intel.com>
In-Reply-To: <9d5a3bc7a779b2dddab4256aa3e0631737861f98.1513867589.git.anatoly.burakov@intel.com>
References: <9d5a3bc7a779b2dddab4256aa3e0631737861f98.1513867589.git.anatoly.burakov@intel.com>
Subject: [dpdk-dev] [PATCH v2 4/6] test: fix memory leak in ring perf
	autotest
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Wed, 17 Jan 2018 08:36:22 -0000

Fixes: ac3fb3019c52 ("app: rework ring tests")
Cc: stable@dpdk.org

Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
 test/test/test_ring_perf.c | 36 ++++++++++++++++++++----------------
 1 file changed, 20 insertions(+), 16 deletions(-)

diff --git a/test/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 4363e4d..ebb3939 100644
--- a/test/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -32,9 +32,6 @@
  */
 static const volatile unsigned bulk_sizes[] = { 8, 32 };
 
-/* The ring structure used for tests */
-static struct rte_ring *r;
-
 struct lcore_pair {
 	unsigned c1, c2;
 };
@@ -115,7 +112,7 @@ get_two_sockets(struct lcore_pair *lcp)
 
 /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
 static void
-test_empty_dequeue(void)
+test_empty_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 26;
 	const unsigned iterations = 1<<iter_shift;
@@ -143,6 +140,7 @@ test_empty_dequeue(void)
  * and return two. Input = burst size, output = cycle average for sp/sc & mp/mc
  */
 struct thread_params {
+	struct rte_ring *r;
 	unsigned size;        /* input value, the burst size */
 	double spsc, mpmc;    /* output value, the single or multi timings */
 };
@@ -157,6 +155,7 @@ enqueue_bulk(void *p)
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
 	struct thread_params *params = p;
+	struct rte_ring *r = params->r;
 	const unsigned size = params->size;
 	unsigned i;
 	void *burst[MAX_BURST] = {0};
@@ -192,6 +191,7 @@ dequeue_bulk(void *p)
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
 	struct thread_params *params = p;
+	struct rte_ring *r = params->r;
 	const unsigned size = params->size;
 	unsigned i;
 	void *burst[MAX_BURST] = {0};
@@ -222,7 +222,7 @@ dequeue_bulk(void *p)
  * used to measure ring perf between hyperthreads, cores and sockets.
  */
 static void
-run_on_core_pair(struct lcore_pair *cores,
+run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r,
 		lcore_function_t f1, lcore_function_t f2)
 {
 	struct thread_params param1 = {0}, param2 = {0};
@@ -230,6 +230,7 @@ run_on_core_pair(struct lcore_pair *cores,
 	for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) {
 		lcore_count = 0;
 		param1.size = param2.size = bulk_sizes[i];
+		param1.r = param2.r = r;
 		if (cores->c1 == rte_get_master_lcore()) {
 			rte_eal_remote_launch(f2, &param2, cores->c2);
 			f1(&param1);
@@ -252,7 +253,7 @@ run_on_core_pair(struct lcore_pair *cores,
  * takes on a single lcore. Result is for comparison with the bulk enq+deq.
  */
 static void
-test_single_enqueue_dequeue(void)
+test_single_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 24;
 	const unsigned iterations = 1<<iter_shift;
@@ -285,7 +286,7 @@ test_single_enqueue_dequeue(void)
  * as for the bulk function called on a single lcore.
  */
 static void
-test_burst_enqueue_dequeue(void)
+test_burst_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
@@ -323,7 +324,7 @@ test_burst_enqueue_dequeue(void)
 
 /* Times enqueue and dequeue on a single lcore */
 static void
-test_bulk_enqueue_dequeue(void)
+test_bulk_enqueue_dequeue(struct rte_ring *r)
 {
 	const unsigned iter_shift = 23;
 	const unsigned iterations = 1<<iter_shift;
@@ -365,32 +366,35 @@ static int
 test_ring_perf(void)
 {
 	struct lcore_pair cores;
+	struct rte_ring *r = NULL;
+
 	r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(), 0);
-	if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
+	if (r == NULL)
 		return -1;
 
 	printf("### Testing single element and burst enq/deq ###\n");
-	test_single_enqueue_dequeue();
-	test_burst_enqueue_dequeue();
+	test_single_enqueue_dequeue(r);
+	test_burst_enqueue_dequeue(r);
 
 	printf("\n### Testing empty dequeue ###\n");
-	test_empty_dequeue();
+	test_empty_dequeue(r);
 
 	printf("\n### Testing using a single lcore ###\n");
-	test_bulk_enqueue_dequeue();
+	test_bulk_enqueue_dequeue(r);
 
 	if (get_two_hyperthreads(&cores) == 0) {
 		printf("\n### Testing using two hyperthreads ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
 	if (get_two_cores(&cores) == 0) {
 		printf("\n### Testing using two physical cores ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
 	if (get_two_sockets(&cores) == 0) {
 		printf("\n### Testing using two NUMA nodes ###\n");
-		run_on_core_pair(&cores, enqueue_bulk, dequeue_bulk);
+		run_on_core_pair(&cores, r, enqueue_bulk, dequeue_bulk);
 	}
+	rte_ring_free(r);
 	return 0;
 }
 
-- 
2.7.4