patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case
       [not found] <1544672265-219262-1-git-send-email-joyce.kong@arm.com>
@ 2018-12-13  3:37 ` Joyce Kong
  2018-12-19 23:34   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
                     ` (7 more replies)
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins Joyce Kong
  1 sibling, 8 replies; 16+ messages in thread
From: Joyce Kong @ 2018-12-13  3:37 UTC (permalink / raw)
  To: dev
  Cc: nd, thomas, jerin.jacob, hemant.agrawal, honnappa.nagarahalli,
	gavin.hu, joyce.kong, stable

Add performance test on all available cores to benchmark
the scaling up performance and fairness of rw_lock.

Fixes: af75078faf ("first public release")
Cc: stable@dpdk.org

Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 test/test/test_rwlock.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 71 insertions(+)

diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
index 29171c4..4766c09 100644
--- a/test/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
@@ -4,6 +4,7 @@
 
 #include <stdio.h>
 #include <stdint.h>
+#include <inttypes.h>
 #include <unistd.h>
 #include <sys/queue.h>
 
@@ -44,6 +45,7 @@
 
 static rte_rwlock_t sl;
 static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+static rte_atomic32_t synchro;
 
 static int
 test_rwlock_per_core(__attribute__((unused)) void *arg)
@@ -65,6 +67,72 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 	return 0;
 }
 
+static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+
+static int
+load_loop_fn(__attribute__((unused)) void *arg)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_rdtsc_precise();
+	while (time_diff < hz * TIME_MS / 1000) {
+		rte_rwlock_write_lock(&lk);
+		rte_pause();
+		rte_rwlock_write_unlock(&lk);
+		rte_rwlock_read_lock(&lk);
+		rte_rwlock_read_lock(&lk);
+		rte_pause();
+		rte_rwlock_read_unlock(&lk);
+		rte_rwlock_read_unlock(&lk);
+		lcount++;
+		/* delay to make lock duty cycle slightly realistic */
+		rte_pause();
+		time_diff = rte_rdtsc_precise() - begin;
+	}
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_rwlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+
+	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
+
+	/* clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+		return -1;
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(NULL);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
 static int
 test_rwlock(void)
 {
@@ -95,6 +163,9 @@ test_rwlock(void)
 
 	rte_eal_mp_wait_lcore();
 
+	if (test_rwlock_perf() < 0)
+		return -1;
+
 	return 0;
 }
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins
       [not found] <1544672265-219262-1-git-send-email-joyce.kong@arm.com>
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
@ 2018-12-13  3:37 ` Joyce Kong
  2018-12-19 23:50   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
  1 sibling, 1 reply; 16+ messages in thread
From: Joyce Kong @ 2018-12-13  3:37 UTC (permalink / raw)
  To: dev
  Cc: nd, Gavin Hu, thomas, jerin.jacob, hemant.agrawal,
	honnappa.nagarahalli, joyce.kong, stable

From: Gavin Hu <gavin.hu@arm.com>

The __sync builtin based implementation generates full memory barriers
('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one
way barriers.

Here is the assembly code of __sync_compare_and_swap builtin.
__sync_bool_compare_and_swap(dst, exp, src);
   0x000000000090f1b0 <+16>:    e0 07 40 f9 ldr x0, [sp, #8]
   0x000000000090f1b4 <+20>:    e1 0f 40 79 ldrh    w1, [sp, #6]
   0x000000000090f1b8 <+24>:    e2 0b 40 79 ldrh    w2, [sp, #4]
   0x000000000090f1bc <+28>:    21 3c 00 12 and w1, w1, #0xffff
   0x000000000090f1c0 <+32>:    03 7c 5f 48 ldxrh   w3, [x0]
   0x000000000090f1c4 <+36>:    7f 00 01 6b cmp w3, w1
   0x000000000090f1c8 <+40>:    61 00 00 54 b.ne    0x90f1d4
<rte_atomic16_cmpset+52>  // b.any
   0x000000000090f1cc <+44>:    02 fc 04 48 stlxrh  w4, w2, [x0]
   0x000000000090f1d0 <+48>:    84 ff ff 35 cbnz    w4, 0x90f1c0
<rte_atomic16_cmpset+32>
   0x000000000090f1d4 <+52>:    bf 3b 03 d5 dmb ish
   0x000000000090f1d8 <+56>:    e0 17 9f 1a cset    w0, eq  // eq = none

Fixes: af75078faf ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Joyce Kong <joyce.kong@arm.com>
Tested-by: Joyce Kong <joyce.kong@arm.com>
---
 lib/librte_eal/common/include/generic/rte_rwlock.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index 5751a0e..51d3aac 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
 	int success = 0;
 
 	while (success == 0) {
-		x = rwl->cnt;
+		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 		/* write lock is held */
 		if (x < 0) {
 			rte_pause();
 			continue;
 		}
-		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-					      (uint32_t)x, (uint32_t)(x + 1));
+		success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1,
+					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 	}
 }
 
@@ -84,7 +84,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
 {
-	rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+	__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
 }
 
 /**
@@ -100,14 +100,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
 	int success = 0;
 
 	while (success == 0) {
-		x = rwl->cnt;
+		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 		/* a lock is held */
 		if (x != 0) {
 			rte_pause();
 			continue;
 		}
-		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-					      0, (uint32_t)-1);
+		success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 	}
 }
 
@@ -120,7 +120,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
 {
-	rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+	__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
 }
 
 /**
-- 
2.7.4

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v1 1/2] test/rwlock: add perf test case
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
@ 2018-12-19 23:34   ` Ananyev, Konstantin
  2018-12-20  1:01     ` Gavin Hu (Arm Technology China)
  2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
                     ` (6 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Ananyev, Konstantin @ 2018-12-19 23:34 UTC (permalink / raw)
  To: Joyce Kong, dev
  Cc: nd, thomas, jerin.jacob, hemant.agrawal, honnappa.nagarahalli,
	gavin.hu, stable


Hi,

> 
> Add performance test on all available cores to benchmark
> the scaling up performance and fairness of rw_lock.
> 
> Fixes: af75078faf ("first public release")
> Cc: stable@dpdk.org
> 
> Suggested-by: Gavin Hu <gavin.hu@arm.com>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
>  test/test/test_rwlock.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 71 insertions(+)
> 
> diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c
> index 29171c4..4766c09 100644
> --- a/test/test/test_rwlock.c
> +++ b/test/test/test_rwlock.c
> @@ -4,6 +4,7 @@
> 
>  #include <stdio.h>
>  #include <stdint.h>
> +#include <inttypes.h>
>  #include <unistd.h>
>  #include <sys/queue.h>
> 
> @@ -44,6 +45,7 @@
> 
>  static rte_rwlock_t sl;
>  static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
> +static rte_atomic32_t synchro;
> 
>  static int
>  test_rwlock_per_core(__attribute__((unused)) void *arg)
> @@ -65,6 +67,72 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
>  	return 0;
>  }
> 
> +static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
> +static uint64_t lock_count[RTE_MAX_LCORE] = {0};
> +
> +#define TIME_MS 100
> +
> +static int
> +load_loop_fn(__attribute__((unused)) void *arg)
> +{
> +	uint64_t time_diff = 0, begin;
> +	uint64_t hz = rte_get_timer_hz();
> +	uint64_t lcount = 0;
> +	const unsigned int lcore = rte_lcore_id();
> +
> +	/* wait synchro for slaves */
> +	if (lcore != rte_get_master_lcore())
> +		while (rte_atomic32_read(&synchro) == 0)
> +			;
> +
> +	begin = rte_rdtsc_precise();
> +	while (time_diff < hz * TIME_MS / 1000) {
> +		rte_rwlock_write_lock(&lk);
> +		rte_pause();

Wouldn't it be more realistic to write/read some shared data here?
Again extra checking could be done in that case that lock behaves as expected.

> +		rte_rwlock_write_unlock(&lk);
> +		rte_rwlock_read_lock(&lk);
> +		rte_rwlock_read_lock(&lk);

Wonder what is the point of double rdlock here?
Konstantin

> +		rte_pause();
> +		rte_rwlock_read_unlock(&lk);
> +		rte_rwlock_read_unlock(&lk);
> +		lcount++;
> +		/* delay to make lock duty cycle slightly realistic */
> +		rte_pause();
> +		time_diff = rte_rdtsc_precise() - begin;
> +	}
> +	lock_count[lcore] = lcount;
> +	return 0;
> +}
> +

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins Joyce Kong
@ 2018-12-19 23:50   ` Ananyev, Konstantin
  0 siblings, 0 replies; 16+ messages in thread
From: Ananyev, Konstantin @ 2018-12-19 23:50 UTC (permalink / raw)
  To: Joyce Kong, dev
  Cc: nd, Gavin Hu, thomas, jerin.jacob, hemant.agrawal,
	honnappa.nagarahalli, stable



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Joyce Kong
> Sent: Thursday, December 13, 2018 3:38 AM
> To: dev@dpdk.org
> Cc: nd@arm.com; Gavin Hu <gavin.hu@arm.com>; thomas@monjalon.net; jerin.jacob@caviumnetworks.com; hemant.agrawal@nxp.com;
> honnappa.nagarahalli@arm.com; joyce.kong@arm.com; stable@dpdk.org
> Subject: [dpdk-dev] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins
> 
> From: Gavin Hu <gavin.hu@arm.com>
> 
> The __sync builtin based implementation generates full memory barriers
> ('dmb ish') on Arm platforms. Using C11 atomic builtins to generate one
> way barriers.
> 
> Here is the assembly code of __sync_compare_and_swap builtin.
> __sync_bool_compare_and_swap(dst, exp, src);
>    0x000000000090f1b0 <+16>:    e0 07 40 f9 ldr x0, [sp, #8]
>    0x000000000090f1b4 <+20>:    e1 0f 40 79 ldrh    w1, [sp, #6]
>    0x000000000090f1b8 <+24>:    e2 0b 40 79 ldrh    w2, [sp, #4]
>    0x000000000090f1bc <+28>:    21 3c 00 12 and w1, w1, #0xffff
>    0x000000000090f1c0 <+32>:    03 7c 5f 48 ldxrh   w3, [x0]
>    0x000000000090f1c4 <+36>:    7f 00 01 6b cmp w3, w1
>    0x000000000090f1c8 <+40>:    61 00 00 54 b.ne    0x90f1d4
> <rte_atomic16_cmpset+52>  // b.any
>    0x000000000090f1cc <+44>:    02 fc 04 48 stlxrh  w4, w2, [x0]
>    0x000000000090f1d0 <+48>:    84 ff ff 35 cbnz    w4, 0x90f1c0
> <rte_atomic16_cmpset+32>
>    0x000000000090f1d4 <+52>:    bf 3b 03 d5 dmb ish
>    0x000000000090f1d8 <+56>:    e0 17 9f 1a cset    w0, eq  // eq = none
> 
> Fixes: af75078faf ("first public release")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Joyce Kong <joyce.kong@arm.com>
> Tested-by: Joyce Kong <joyce.kong@arm.com>
> ---
>  lib/librte_eal/common/include/generic/rte_rwlock.h | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)

Looks ok to me in general, but I think needs to run extra perf testing
to check for regression/correctness on other platforms (obviously IA is my main concern).
Another thing - my personal preference would be to have it rebased with '_trylock'
functions also updated - for consistency, plus extra functional tests will be available for new method. 

> 
> diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
> index 5751a0e..51d3aac 100644
> --- a/lib/librte_eal/common/include/generic/rte_rwlock.h
> +++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
> @@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
>  	int success = 0;
> 
>  	while (success == 0) {
> -		x = rwl->cnt;
> +		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
>  		/* write lock is held */
>  		if (x < 0) {
>  			rte_pause();
>  			continue;
>  		}
> -		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
> -					      (uint32_t)x, (uint32_t)(x + 1));
> +		success = __atomic_compare_exchange_n(&rwl->cnt, &x, x+1, 1,

As a nit: 'x + 1'


> +					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
>  	}
>  }
> 
> @@ -84,7 +84,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
>  static inline void
>  rte_rwlock_read_unlock(rte_rwlock_t *rwl)
>  {
> -	rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
> +	__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
>  }
> 
>  /**
> @@ -100,14 +100,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
>  	int success = 0;
> 
>  	while (success == 0) {
> -		x = rwl->cnt;
> +		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
>  		/* a lock is held */
>  		if (x != 0) {
>  			rte_pause();
>  			continue;
>  		}
> -		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
> -					      0, (uint32_t)-1);
> +		success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
> +					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
>  	}
>  }
> 
> @@ -120,7 +120,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
>  static inline void
>  rte_rwlock_write_unlock(rte_rwlock_t *rwl)
>  {
> -	rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
> +	__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
>  }
> 
>  /**
> --
> 2.7.4

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v1 1/2] test/rwlock: add perf test case
  2018-12-19 23:34   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
@ 2018-12-20  1:01     ` Gavin Hu (Arm Technology China)
  2018-12-20  1:45       ` Honnappa Nagarahalli
  0 siblings, 1 reply; 16+ messages in thread
From: Gavin Hu (Arm Technology China) @ 2018-12-20  1:01 UTC (permalink / raw)
  To: Ananyev, Konstantin, Joyce Kong (Arm Technology China), dev
  Cc: nd, thomas, hemant.agrawal, Honnappa Nagarahalli, stable,
	chaozhu, jerinj, nd



> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Sent: Thursday, December 20, 2018 7:35 AM
> To: Joyce Kong (Arm Technology China) <Joyce.Kong@arm.com>;
> dev@dpdk.org
> Cc: nd <nd@arm.com>; thomas@monjalon.net;
> jerin.jacob@caviumnetworks.com; hemant.agrawal@nxp.com; Honnappa
> Nagarahalli <Honnappa.Nagarahalli@arm.com>; Gavin Hu (Arm Technology
> China) <Gavin.Hu@arm.com>; stable@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v1 1/2] test/rwlock: add perf test case
> 
> 
> Hi,
> 
> >
> > Add performance test on all available cores to benchmark the scaling
> > up performance and fairness of rw_lock.
> >
> > Fixes: af75078faf ("first public release")
> > Cc: stable@dpdk.org
> >
> > Suggested-by: Gavin Hu <gavin.hu@arm.com>
> > Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > ---
> >  test/test/test_rwlock.c | 71
> > +++++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 71 insertions(+)
> >
> > diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c index
> > 29171c4..4766c09 100644
> > --- a/test/test/test_rwlock.c
> > +++ b/test/test/test_rwlock.c
> > @@ -4,6 +4,7 @@
> >
> >  #include <stdio.h>
> >  #include <stdint.h>
> > +#include <inttypes.h>
> >  #include <unistd.h>
> >  #include <sys/queue.h>
> >
> > @@ -44,6 +45,7 @@
> >
> >  static rte_rwlock_t sl;
> >  static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
> > +static rte_atomic32_t synchro;
> >
> >  static int
> >  test_rwlock_per_core(__attribute__((unused)) void *arg) @@ -65,6
> > +67,72 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
> >  	return 0;
> >  }
> >
> > +static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER; static uint64_t
> > +lock_count[RTE_MAX_LCORE] = {0};
> > +
> > +#define TIME_MS 100
> > +
> > +static int
> > +load_loop_fn(__attribute__((unused)) void *arg) {
> > +	uint64_t time_diff = 0, begin;
> > +	uint64_t hz = rte_get_timer_hz();
> > +	uint64_t lcount = 0;
> > +	const unsigned int lcore = rte_lcore_id();
> > +
> > +	/* wait synchro for slaves */
> > +	if (lcore != rte_get_master_lcore())
> > +		while (rte_atomic32_read(&synchro) == 0)
> > +			;
> > +
> > +	begin = rte_rdtsc_precise();
> > +	while (time_diff < hz * TIME_MS / 1000) {
> > +		rte_rwlock_write_lock(&lk);
> > +		rte_pause();
> 
> Wouldn't it be more realistic to write/read some shared data here?
> Again extra checking could be done in that case that lock behaves as
> expected.
Will do it in v2, thanks!
> 
> > +		rte_rwlock_write_unlock(&lk);
> > +		rte_rwlock_read_lock(&lk);
> > +		rte_rwlock_read_lock(&lk);
> 
> Wonder what is the point of double rdlock here?
> Konstantin
Double rd lock is to check rd locks will not block each other. 
Anyway I will remove it in v2 if no concerns here.
> 
> > +		rte_pause();
> > +		rte_rwlock_read_unlock(&lk);
> > +		rte_rwlock_read_unlock(&lk);
> > +		lcount++;
> > +		/* delay to make lock duty cycle slightly realistic */
> > +		rte_pause();
> > +		time_diff = rte_rdtsc_precise() - begin;
> > +	}
> > +	lock_count[lcore] = lcount;
> > +	return 0;
> > +}
> > +

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH v1 1/2] test/rwlock: add perf test case
  2018-12-20  1:01     ` Gavin Hu (Arm Technology China)
@ 2018-12-20  1:45       ` Honnappa Nagarahalli
  0 siblings, 0 replies; 16+ messages in thread
From: Honnappa Nagarahalli @ 2018-12-20  1:45 UTC (permalink / raw)
  To: Gavin Hu (Arm Technology China),
	Ananyev, Konstantin, Joyce Kong (Arm Technology China),
	dev
  Cc: nd, thomas, hemant.agrawal, stable, chaozhu, jerinj, nd

> >
> > Hi,
> >
> > >
> > > Add performance test on all available cores to benchmark the scaling
> > > up performance and fairness of rw_lock.
> > >
> > > Fixes: af75078faf ("first public release")
> > > Cc: stable@dpdk.org
> > >
> > > Suggested-by: Gavin Hu <gavin.hu@arm.com>
> > > Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> > > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > > Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> > > Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> > > ---
> > >  test/test/test_rwlock.c | 71
> > > +++++++++++++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 71 insertions(+)
> > >
> > > diff --git a/test/test/test_rwlock.c b/test/test/test_rwlock.c index
> > > 29171c4..4766c09 100644
> > > --- a/test/test/test_rwlock.c
> > > +++ b/test/test/test_rwlock.c
> > > @@ -4,6 +4,7 @@
> > >
> > >  #include <stdio.h>
> > >  #include <stdint.h>
> > > +#include <inttypes.h>
> > >  #include <unistd.h>
> > >  #include <sys/queue.h>
> > >
> > > @@ -44,6 +45,7 @@
> > >
> > >  static rte_rwlock_t sl;
> > >  static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
> > > +static rte_atomic32_t synchro;
> > >
> > >  static int
> > >  test_rwlock_per_core(__attribute__((unused)) void *arg) @@ -65,6
> > > +67,72 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
> > >  	return 0;
> > >  }
> > >
> > > +static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER; static uint64_t
> > > +lock_count[RTE_MAX_LCORE] = {0};
> > > +
> > > +#define TIME_MS 100
> > > +
> > > +static int
> > > +load_loop_fn(__attribute__((unused)) void *arg) {
> > > +	uint64_t time_diff = 0, begin;
> > > +	uint64_t hz = rte_get_timer_hz();
> > > +	uint64_t lcount = 0;
> > > +	const unsigned int lcore = rte_lcore_id();
> > > +
> > > +	/* wait synchro for slaves */
> > > +	if (lcore != rte_get_master_lcore())
> > > +		while (rte_atomic32_read(&synchro) == 0)
> > > +			;
> > > +
> > > +	begin = rte_rdtsc_precise();
> > > +	while (time_diff < hz * TIME_MS / 1000) {
> > > +		rte_rwlock_write_lock(&lk);
> > > +		rte_pause();
> >
> > Wouldn't it be more realistic to write/read some shared data here?
> > Again extra checking could be done in that case that lock behaves as
> > expected.
> Will do it in v2, thanks!
> >
> > > +		rte_rwlock_write_unlock(&lk);
> > > +		rte_rwlock_read_lock(&lk);
> > > +		rte_rwlock_read_lock(&lk);
> >
> > Wonder what is the point of double rdlock here?
> > Konstantin
> Double rd lock is to check rd locks will not block each other.
> Anyway I will remove it in v2 if no concerns here.
> >
> > > +		rte_pause();
> > > +		rte_rwlock_read_unlock(&lk);
> > > +		rte_rwlock_read_unlock(&lk);
> > > +		lcount++;
> > > +		/* delay to make lock duty cycle slightly realistic */
> > > +		rte_pause();
> > > +		time_diff = rte_rdtsc_precise() - begin;
> > > +	}
Should we change the way the measurement is done? We are measuring 'how many locks/unlocks per <certain time>'. This introduces more over head due to rte_rdtsc_precise call for every iteration. If we do, 'how many cycles it takes to do <certain number of locks/unlocks>', the over head of rte_rdtsc_precise can be amortized and will be very little.

> > > +	lock_count[lcore] = lcount;
> > > +	return 0;
> > > +}
> > > +

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v3 2/3] test/rwlock: add perf test case on all available cores
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
  2018-12-19 23:34   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
@ 2019-03-14 13:15   ` Joyce Kong
  2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 16+ messages in thread
From: Joyce Kong @ 2019-03-14 13:15 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Add performance test on all available cores to benchmark
the scaling up performance of rw_lock.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 app/test/test_rwlock.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 75 insertions(+)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 224f0de..f1c5f40 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -36,6 +36,7 @@
 
 static rte_rwlock_t sl;
 static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+static rte_atomic32_t synchro;
 
 enum {
 	LC_TYPE_RDLOCK,
@@ -83,6 +84,77 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 	return 0;
 }
 
+static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
+static volatile uint64_t rwlock_data;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+#define TEST_RWLOCK_DEBUG 0
+
+static int
+load_loop_fn(__attribute__((unused)) void *arg)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_rdtsc_precise();
+	while (time_diff < hz * TIME_MS / 1000) {
+		rte_rwlock_write_lock(&lk);
+		++rwlock_data;
+		rte_rwlock_write_unlock(&lk);
+
+		rte_rwlock_read_lock(&lk);
+		if (TEST_RWLOCK_DEBUG & !(lcount % 100))
+			printf("Core [%u] rwlock_data = %"PRIu64"\n",
+					lcore, rwlock_data);
+		rte_rwlock_read_unlock(&lk);
+
+		lcount++;
+		/* delay to make lock duty cycle slightly realistic */
+		rte_pause();
+		time_diff = rte_rdtsc_precise() - begin;
+	}
+
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_rwlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+
+	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
+
+	/* clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+		return -1;
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(NULL);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
 /*
  * - There is a global rwlock and a table of rwlocks (one per lcore).
  *
@@ -132,6 +204,9 @@ rwlock_test1(void)
 
 	rte_eal_mp_wait_lcore();
 
+	if (test_rwlock_perf() < 0)
+		return -1;
+
 	return 0;
 }
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
  2018-12-19 23:34   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
  2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
@ 2019-03-14 13:15   ` Joyce Kong
  2019-03-14 15:02     ` Honnappa Nagarahalli
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
                     ` (4 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Joyce Kong @ 2019-03-14 13:15 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Instead of getting timestamp per iteration, amortize its
overhead can help to get more precise benchmarking results.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
---
 app/test/test_rwlock.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index f1c5f40..48291c2 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -86,9 +86,9 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 
 static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
 static volatile uint64_t rwlock_data;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
 
-#define TIME_MS 100
+#define MAX_LOOP 10000
 #define TEST_RWLOCK_DEBUG 0
 
 static int
@@ -105,7 +105,7 @@ load_loop_fn(__attribute__((unused)) void *arg)
 			;
 
 	begin = rte_rdtsc_precise();
-	while (time_diff < hz * TIME_MS / 1000) {
+	while (lcount < MAX_LOOP) {
 		rte_rwlock_write_lock(&lk);
 		++rwlock_data;
 		rte_rwlock_write_unlock(&lk);
@@ -119,10 +119,10 @@ load_loop_fn(__attribute__((unused)) void *arg)
 		lcount++;
 		/* delay to make lock duty cycle slightly realistic */
 		rte_pause();
-		time_diff = rte_rdtsc_precise() - begin;
 	}
 
-	lock_count[lcore] = lcount;
+	time_diff = rte_rdtsc_precise() - begin;
+	time_count[lcore] = time_diff * 1000000 / hz;
 	return 0;
 }
 
@@ -146,11 +146,13 @@ test_rwlock_perf(void)
 	rte_eal_mp_wait_lcore();
 
 	RTE_LCORE_FOREACH(i) {
-		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
-		total += lock_count[i];
+		printf("Core [%u] cost time = %"PRIu64" us\n",
+				i, time_count[i]);
+		total += time_count[i];
 	}
 
-	printf("Total count = %"PRIu64"\n", total);
+	printf("Total cost time = %"PRIu64" us\n", total);
+	memset(time_count, 0, sizeof(time_count));
 
 	return 0;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time
  2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
@ 2019-03-14 15:02     ` Honnappa Nagarahalli
  0 siblings, 0 replies; 16+ messages in thread
From: Honnappa Nagarahalli @ 2019-03-14 15:02 UTC (permalink / raw)
  To: Joyce Kong (Arm Technology China), dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, Gavin Hu (Arm Technology China),
	stable, nd

> Subject: [PATCH v3 3/3] test/rwlock: amortize the cost of getting time
> 
> Instead of getting timestamp per iteration, amortize its overhead can help to
> get more precise benchmarking results.
> 
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Signed-off-by tag is repeated.
I think you need to add Jerin's acked-by tag to all the patches in this series.

<snip>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
                     ` (2 preceding siblings ...)
  2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
@ 2019-03-20  6:25   ` Joyce Kong
  2019-03-21 18:44     ` Ananyev, Konstantin
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
                     ` (3 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Joyce Kong @ 2019-03-20  6:25 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Add performance test on all available cores to benchmark
the scaling up performance of rwlock.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
---
 app/test/test_rwlock.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 75 insertions(+)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 224f0de..f1c5f40 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -36,6 +36,7 @@
 
 static rte_rwlock_t sl;
 static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+static rte_atomic32_t synchro;
 
 enum {
 	LC_TYPE_RDLOCK,
@@ -83,6 +84,77 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 	return 0;
 }
 
+static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
+static volatile uint64_t rwlock_data;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+#define TEST_RWLOCK_DEBUG 0
+
+static int
+load_loop_fn(__attribute__((unused)) void *arg)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_rdtsc_precise();
+	while (time_diff < hz * TIME_MS / 1000) {
+		rte_rwlock_write_lock(&lk);
+		++rwlock_data;
+		rte_rwlock_write_unlock(&lk);
+
+		rte_rwlock_read_lock(&lk);
+		if (TEST_RWLOCK_DEBUG & !(lcount % 100))
+			printf("Core [%u] rwlock_data = %"PRIu64"\n",
+					lcore, rwlock_data);
+		rte_rwlock_read_unlock(&lk);
+
+		lcount++;
+		/* delay to make lock duty cycle slightly realistic */
+		rte_pause();
+		time_diff = rte_rdtsc_precise() - begin;
+	}
+
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_rwlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+
+	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
+
+	/* clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+		return -1;
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(NULL);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
 /*
  * - There is a global rwlock and a table of rwlocks (one per lcore).
  *
@@ -132,6 +204,9 @@ rwlock_test1(void)
 
 	rte_eal_mp_wait_lcore();
 
+	if (test_rwlock_perf() < 0)
+		return -1;
+
 	return 0;
 }
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
                     ` (3 preceding siblings ...)
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
@ 2019-03-20  6:25   ` Joyce Kong
  2019-03-21 18:44     ` Ananyev, Konstantin
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 1/3] rwlock: reimplement with atomic builtins Joyce Kong
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 16+ messages in thread
From: Joyce Kong @ 2019-03-20  6:25 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Instead of getting timestamp per iteration, amortize its
overhead can help to get more precise benchmarking results.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 app/test/test_rwlock.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index f1c5f40..48291c2 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -86,9 +86,9 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 
 static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
 static volatile uint64_t rwlock_data;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
 
-#define TIME_MS 100
+#define MAX_LOOP 10000
 #define TEST_RWLOCK_DEBUG 0
 
 static int
@@ -105,7 +105,7 @@ load_loop_fn(__attribute__((unused)) void *arg)
 			;
 
 	begin = rte_rdtsc_precise();
-	while (time_diff < hz * TIME_MS / 1000) {
+	while (lcount < MAX_LOOP) {
 		rte_rwlock_write_lock(&lk);
 		++rwlock_data;
 		rte_rwlock_write_unlock(&lk);
@@ -119,10 +119,10 @@ load_loop_fn(__attribute__((unused)) void *arg)
 		lcount++;
 		/* delay to make lock duty cycle slightly realistic */
 		rte_pause();
-		time_diff = rte_rdtsc_precise() - begin;
 	}
 
-	lock_count[lcore] = lcount;
+	time_diff = rte_rdtsc_precise() - begin;
+	time_count[lcore] = time_diff * 1000000 / hz;
 	return 0;
 }
 
@@ -146,11 +146,13 @@ test_rwlock_perf(void)
 	rte_eal_mp_wait_lcore();
 
 	RTE_LCORE_FOREACH(i) {
-		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
-		total += lock_count[i];
+		printf("Core [%u] cost time = %"PRIu64" us\n",
+				i, time_count[i]);
+		total += time_count[i];
 	}
 
-	printf("Total count = %"PRIu64"\n", total);
+	printf("Total cost time = %"PRIu64" us\n", total);
+	memset(time_count, 0, sizeof(time_count));
 
 	return 0;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
@ 2019-03-21 18:44     ` Ananyev, Konstantin
  0 siblings, 0 replies; 16+ messages in thread
From: Ananyev, Konstantin @ 2019-03-21 18:44 UTC (permalink / raw)
  To: Joyce Kong, dev
  Cc: nd, jerinj, chaozhu, Richardson, Bruce, thomas, hemant.agrawal,
	honnappa.nagarahalli, gavin.hu, stable



> 
> Add performance test on all available cores to benchmark
> the scaling up performance of rwlock.
> 
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
> 
> Suggested-by: Gavin Hu <gavin.hu@arm.com>
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> --

Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
@ 2019-03-21 18:44     ` Ananyev, Konstantin
  0 siblings, 0 replies; 16+ messages in thread
From: Ananyev, Konstantin @ 2019-03-21 18:44 UTC (permalink / raw)
  To: Joyce Kong, dev
  Cc: nd, jerinj, chaozhu, Richardson, Bruce, thomas, hemant.agrawal,
	honnappa.nagarahalli, gavin.hu, stable



> 
> Instead of getting timestamp per iteration, amortize its
> overhead can help to get more precise benchmarking results.
> 
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Joyce Kong <joyce.kong@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>

> 2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v5 1/3] rwlock: reimplement with atomic builtins
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
                     ` (4 preceding siblings ...)
  2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
@ 2019-03-25  9:14   ` Joyce Kong
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
  7 siblings, 0 replies; 16+ messages in thread
From: Joyce Kong @ 2019-03-25  9:14 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

The __sync builtin based implementation generates full memory
barriers ('dmb ish') on Arm platforms. Using C11 atomic builtins
to generate one way barriers.

Here is the assembly code of __sync_compare_and_swap builtin.
__sync_bool_compare_and_swap(dst, exp, src);
   0x000000000090f1b0 <+16>:    e0 07 40 f9 ldr x0, [sp, #8]
   0x000000000090f1b4 <+20>:    e1 0f 40 79 ldrh    w1, [sp, #6]
   0x000000000090f1b8 <+24>:    e2 0b 40 79 ldrh    w2, [sp, #4]
   0x000000000090f1bc <+28>:    21 3c 00 12 and w1, w1, #0xffff
   0x000000000090f1c0 <+32>:    03 7c 5f 48 ldxrh   w3, [x0]
   0x000000000090f1c4 <+36>:    7f 00 01 6b cmp w3, w1
   0x000000000090f1c8 <+40>:    61 00 00 54 b.ne    0x90f1d4
<rte_atomic16_cmpset+52>  // b.any
   0x000000000090f1cc <+44>:    02 fc 04 48 stlxrh  w4, w2, [x0]
   0x000000000090f1d0 <+48>:    84 ff ff 35 cbnz    w4, 0x90f1c0
<rte_atomic16_cmpset+32>
   0x000000000090f1d4 <+52>:    bf 3b 03 d5 dmb ish
   0x000000000090f1d8 <+56>:    e0 17 9f 1a cset    w0, eq  // eq = none

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Tested-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Jerin Jacob <jerinj@marvell.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 lib/librte_eal/common/include/generic/rte_rwlock.h | 29 +++++++++++-----------
 1 file changed, 15 insertions(+), 14 deletions(-)

diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index b05d85a..31608fa 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -64,14 +64,14 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
 	int success = 0;
 
 	while (success == 0) {
-		x = rwl->cnt;
+		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 		/* write lock is held */
 		if (x < 0) {
 			rte_pause();
 			continue;
 		}
-		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-					      (uint32_t)x, (uint32_t)(x + 1));
+		success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 	}
 }
 
@@ -95,13 +95,14 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
 	int success = 0;
 
 	while (success == 0) {
-		x = rwl->cnt;
+		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 		/* write lock is held */
 		if (x < 0)
 			return -EBUSY;
-		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-					      (uint32_t)x, (uint32_t)(x + 1));
+		success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1,
+					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 	}
+
 	return 0;
 }
 
@@ -114,7 +115,7 @@ rte_rwlock_read_trylock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_read_unlock(rte_rwlock_t *rwl)
 {
-	rte_atomic32_dec((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+	__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
 }
 
 /**
@@ -135,9 +136,9 @@ rte_rwlock_write_trylock(rte_rwlock_t *rwl)
 {
 	int32_t x;
 
-	x = rwl->cnt;
-	if (x != 0 || rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-				0, (uint32_t)-1) == 0)
+	x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
+	if (x != 0 || __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+			      __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
 		return -EBUSY;
 
 	return 0;
@@ -156,14 +157,14 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
 	int success = 0;
 
 	while (success == 0) {
-		x = rwl->cnt;
+		x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
 		/* a lock is held */
 		if (x != 0) {
 			rte_pause();
 			continue;
 		}
-		success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
-					      0, (uint32_t)-1);
+		success = __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1,
+					__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
 	}
 }
 
@@ -176,7 +177,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
 static inline void
 rte_rwlock_write_unlock(rte_rwlock_t *rwl)
 {
-	rte_atomic32_inc((rte_atomic32_t *)(intptr_t)&rwl->cnt);
+	__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
 }
 
 /**
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v5 2/3] test/rwlock: add perf test case on all available cores
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
                     ` (5 preceding siblings ...)
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 1/3] rwlock: reimplement with atomic builtins Joyce Kong
@ 2019-03-25  9:14   ` Joyce Kong
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
  7 siblings, 0 replies; 16+ messages in thread
From: Joyce Kong @ 2019-03-25  9:14 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Add performance test on all available cores to benchmark
the scaling up performance of rw_lock.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test/test_rwlock.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 75 insertions(+)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 224f0de..1d3774e 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -36,6 +36,7 @@
 
 static rte_rwlock_t sl;
 static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+static rte_atomic32_t synchro;
 
 enum {
 	LC_TYPE_RDLOCK,
@@ -83,6 +84,77 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 	return 0;
 }
 
+static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
+static volatile uint64_t rwlock_data;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+#define TEST_RWLOCK_DEBUG 0
+
+static int
+load_loop_fn(__attribute__((unused)) void *arg)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_rdtsc_precise();
+	while (time_diff < hz * TIME_MS / 1000) {
+		rte_rwlock_write_lock(&lk);
+		++rwlock_data;
+		rte_rwlock_write_unlock(&lk);
+
+		rte_rwlock_read_lock(&lk);
+		if (TEST_RWLOCK_DEBUG && !(lcount % 100))
+			printf("Core [%u] rwlock_data = %"PRIu64"\n",
+				lcore, rwlock_data);
+		rte_rwlock_read_unlock(&lk);
+
+		lcount++;
+		/* delay to make lock duty cycle slightly realistic */
+		rte_pause();
+		time_diff = rte_rdtsc_precise() - begin;
+	}
+
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_rwlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+
+	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
+
+	/* clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+		return -1;
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(NULL);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
 /*
  * - There is a global rwlock and a table of rwlocks (one per lcore).
  *
@@ -132,6 +204,9 @@ rwlock_test1(void)
 
 	rte_eal_mp_wait_lcore();
 
+	if (test_rwlock_perf() < 0)
+		return -1;
+
 	return 0;
 }
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [dpdk-stable] [PATCH v5 3/3] test/rwlock: amortize the cost of getting time
  2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
                     ` (6 preceding siblings ...)
  2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
@ 2019-03-25  9:14   ` Joyce Kong
  7 siblings, 0 replies; 16+ messages in thread
From: Joyce Kong @ 2019-03-25  9:14 UTC (permalink / raw)
  To: dev
  Cc: nd, jerinj, konstantin.ananyev, chaozhu, bruce.richardson,
	thomas, hemant.agrawal, honnappa.nagarahalli, gavin.hu, stable

Instead of getting timestamp per iteration, amortize its
overhead can help to get more precise benchmarking results.

Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 app/test/test_rwlock.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 1d3774e..c3d656a 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -86,9 +86,9 @@ test_rwlock_per_core(__attribute__((unused)) void *arg)
 
 static rte_rwlock_t lk = RTE_RWLOCK_INITIALIZER;
 static volatile uint64_t rwlock_data;
-static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+static uint64_t time_count[RTE_MAX_LCORE] = {0};
 
-#define TIME_MS 100
+#define MAX_LOOP 10000
 #define TEST_RWLOCK_DEBUG 0
 
 static int
@@ -105,7 +105,7 @@ load_loop_fn(__attribute__((unused)) void *arg)
 			;
 
 	begin = rte_rdtsc_precise();
-	while (time_diff < hz * TIME_MS / 1000) {
+	while (lcount < MAX_LOOP) {
 		rte_rwlock_write_lock(&lk);
 		++rwlock_data;
 		rte_rwlock_write_unlock(&lk);
@@ -119,10 +119,10 @@ load_loop_fn(__attribute__((unused)) void *arg)
 		lcount++;
 		/* delay to make lock duty cycle slightly realistic */
 		rte_pause();
-		time_diff = rte_rdtsc_precise() - begin;
 	}
 
-	lock_count[lcore] = lcount;
+	time_diff = rte_rdtsc_precise() - begin;
+	time_count[lcore] = time_diff * 1000000 / hz;
 	return 0;
 }
 
@@ -146,11 +146,13 @@ test_rwlock_perf(void)
 	rte_eal_mp_wait_lcore();
 
 	RTE_LCORE_FOREACH(i) {
-		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
-		total += lock_count[i];
+		printf("Core [%u] cost time = %"PRIu64" us\n",
+			i, time_count[i]);
+		total += time_count[i];
 	}
 
-	printf("Total count = %"PRIu64"\n", total);
+	printf("Total cost time = %"PRIu64" us\n", total);
+	memset(time_count, 0, sizeof(time_count));
 
 	return 0;
 }
-- 
2.7.4


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2019-03-25  9:15 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1544672265-219262-1-git-send-email-joyce.kong@arm.com>
2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 1/2] test/rwlock: add perf test case Joyce Kong
2018-12-19 23:34   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin
2018-12-20  1:01     ` Gavin Hu (Arm Technology China)
2018-12-20  1:45       ` Honnappa Nagarahalli
2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
2019-03-14 13:15   ` [dpdk-stable] [PATCH v3 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
2019-03-14 15:02     ` Honnappa Nagarahalli
2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
2019-03-21 18:44     ` Ananyev, Konstantin
2019-03-20  6:25   ` [dpdk-stable] [PATCH v4 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
2019-03-21 18:44     ` Ananyev, Konstantin
2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 1/3] rwlock: reimplement with atomic builtins Joyce Kong
2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 2/3] test/rwlock: add perf test case on all available cores Joyce Kong
2019-03-25  9:14   ` [dpdk-stable] [PATCH v5 3/3] test/rwlock: amortize the cost of getting time Joyce Kong
2018-12-13  3:37 ` [dpdk-stable] [PATCH v1 2/2] rwlock: reimplement with __atomic builtins Joyce Kong
2018-12-19 23:50   ` [dpdk-stable] [dpdk-dev] " Ananyev, Konstantin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).