patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores
@ 2019-09-10  3:36 Joyce Kong
  2019-09-10  3:36 ` [dpdk-stable] [PATCH 18.11 2/2] test/rwlock: amortize the cost of getting time Joyce Kong
  2019-09-10 10:18 ` [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Kevin Traynor
  0 siblings, 2 replies; 3+ messages in thread
From: Joyce Kong @ 2019-09-10  3:36 UTC (permalink / raw)
  To: stable, ktraynor; +Cc: joyce

[ upstream commit fe252fb695efa9deb95f2e6b7baf6f805996a5b0 ]

Add performance test on all available cores to benchmark
the scaling up performance of rw_lock.

Fixes: af75078fece3 ("first public release")

Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 test/test/test_rwlock.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 76 insertions(+)

diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
index 29171c4..d654d48 100644
--- a/test/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
@@ -4,6 +4,7 @@
 
 #include <stdio.h>
 #include <stdint.h>
+#include <inttypes.h>
 #include <unistd.h>
 #include <sys/queue.h>
 
@@ -44,6 +45,7 @@
 
 static rte_rwlock_t sl;
 static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+static rte_atomic32_t synchro;
 
 static int
 test_rwlock_per_core(__attribute__((unused)) void *arg)
@@ -65,6 +67,77 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 	return 0;
 }
 
+static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
+static volatile uint64_t rwlock_data;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+#define TEST_RWLOCK_DEBUG 0
+
+static int
+load_loop_fn(__attribute__((unused)) void *arg)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_rdtsc_precise();
+	while (time_diff < hz * TIME_MS / 1000) {
+		rte_rwlock_write_lock(&lk);
+		++rwlock_data;
+		rte_rwlock_write_unlock(&lk);
+
+		rte_rwlock_read_lock(&lk);
+		if (TEST_RWLOCK_DEBUG && !(lcount % 100))
+			printf("Core [%u] rwlock_data = %"PRIu64"\n",
+				 lcore, rwlock_data);
+		rte_rwlock_read_unlock(&lk);
+
+		lcount++;
+		/* delay to make lock duty cycle slightly realistic */
+		rte_pause();
+		time_diff = rte_rdtsc_precise() - begin;
+	}
+
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_rwlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+
+	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
+
+	/* clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+		return -1;
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(NULL);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
 static int
 test_rwlock(void)
 {
@@ -95,6 +168,9 @@ test_rwlock(void)
 
 	rte_eal_mp_wait_lcore();
 
+	if (test_rwlock_perf() < 0)
+		return -1;
+
 	return 0;
 }
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [dpdk-stable] [PATCH 18.11 2/2] test/rwlock: amortize the cost of getting time
  2019-09-10  3:36 [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Joyce Kong
@ 2019-09-10  3:36 ` Joyce Kong
  2019-09-10 10:18 ` [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Kevin Traynor
  1 sibling, 0 replies; 3+ messages in thread
From: Joyce Kong @ 2019-09-10  3:36 UTC (permalink / raw)
  To: stable, ktraynor; +Cc: joyce

[ upstream commit 6fef1ae4fc109807d13de4235281960b3b1dfd51 ]

Instead of getting timestamp per iteration, amortize its
overhead can help to get more precise benchmarking results.

Fixes: af75078fece3 ("first public release")

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 test/test/test_rwlock.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
index d654d48..7c9b919 100644
--- a/test/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
@@ -7,6 +7,7 @@
 #include <inttypes.h>
 #include <unistd.h>
 #include <sys/queue.h>
+#include <string.h>
 
 #include <rte_common.h>
 #include <rte_memory.h>
@@ -69,9 +70,9 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 
 static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
 static volatile uint64_t rwlock_data;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
 
-#define TIME_MS 100
+#define MAX_LOOP 10000
 #define TEST_RWLOCK_DEBUG 0
 
 static int
@@ -88,7 +89,7 @@ load_loop_fn(__attribute__((unused)) void *arg)
 			;
 
 	begin = rte_rdtsc_precise();
-	while (time_diff < hz * TIME_MS / 1000) {
+	while (lcount < MAX_LOOP) {
 		rte_rwlock_write_lock(&lk);
 		++rwlock_data;
 		rte_rwlock_write_unlock(&lk);
@@ -102,10 +103,10 @@ load_loop_fn(__attribute__((unused)) void *arg)
 		lcount++;
 		/* delay to make lock duty cycle slightly realistic */
 		rte_pause();
-		time_diff = rte_rdtsc_precise() - begin;
 	}
 
-	lock_count[lcore] = lcount;
+	time_diff = rte_rdtsc_precise() - begin;
+	time_count[lcore] = time_diff * 1000000 / hz;
 	return 0;
 }
 
@@ -129,11 +130,13 @@ test_rwlock_perf(void)
 	rte_eal_mp_wait_lcore();
 
 	RTE_LCORE_FOREACH(i) {
-		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
-		total += lock_count[i];
+		printf("Core [%u] cost time = %"PRIu64" us\n",
+		       i, time_count[i]);
+		total += time_count[i];
 	}
 
-	printf("Total count = %"PRIu64"\n", total);
+	printf("Total cost time = %"PRIu64" us\n", total);
+	memset(time_count, 0, sizeof(time_count));
 
 	return 0;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores
  2019-09-10  3:36 [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Joyce Kong
  2019-09-10  3:36 ` [dpdk-stable] [PATCH 18.11 2/2] test/rwlock: amortize the cost of getting time Joyce Kong
@ 2019-09-10 10:18 ` Kevin Traynor
  1 sibling, 0 replies; 3+ messages in thread
From: Kevin Traynor @ 2019-09-10 10:18 UTC (permalink / raw)
  To: Joyce Kong, stable; +Cc: joyce

On 10/09/2019 04:36, Joyce Kong wrote:
> [ upstream commit fe252fb695efa9deb95f2e6b7baf6f805996a5b0 ]
> 
> Add performance test on all available cores to benchmark
> the scaling up performance of rw_lock.
> 
> Fixes: af75078fece3 ("first public release")
> 
> Suggested-by: Gavin Hu <gavin.hu@arm.com>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

Thanks, added these patches to the queued commits.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-09-10 10:18 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-10  3:36 [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Joyce Kong
2019-09-10  3:36 ` [dpdk-stable] [PATCH 18.11 2/2] test/rwlock: amortize the cost of getting time Joyce Kong
2019-09-10 10:18 ` [dpdk-stable] [PATCH 18.11 1/2] test/rwlock: benchmark on all available cores Kevin Traynor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).