DPDK patches and discussions
 help / color / mirror / Atom feed
From: Dharmik Thakkar <dharmik.thakkar@arm.com>
To: Yipeng Wang <yipeng1.wang@intel.com>,
	Sameh Gobriel <sameh.gobriel@intel.com>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Cc: dev@dpdk.org, nd@arm.com,
	Dharmik Thakkar <dharmik.thakkar@arm.com>,
	Joyce Kong <joyce.kong@arm.com>,
	Ruifeng Wang <ruifeng.wang@arm.com>
Subject: [dpdk-dev] [PATCH] test/hash: use compiler atomics for sync
Date: Wed, 22 Sep 2021 16:52:05 -0500	[thread overview]
Message-ID: <20210922215205.2638916-1-dharmik.thakkar@arm.com> (raw)

Convert rte_atomic usages to compiler atomic built-ins
for stats sync

Signed-off-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
Reviewed-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
 app/test/test_hash_multiwriter.c | 19 ++++----
 app/test/test_hash_readwrite.c   | 80 +++++++++++++++-----------------
 2 files changed, 45 insertions(+), 54 deletions(-)

diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index afa3c7b93d85..0c5a8ca18607 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -43,8 +43,8 @@ const uint32_t nb_entries = 5*1024*1024;
 const uint32_t nb_total_tsx_insertion = 4.5*1024*1024;
 uint32_t rounded_nb_total_tsx_insertion;
 
-static rte_atomic64_t gcycles;
-static rte_atomic64_t ginsertions;
+static uint64_t gcycles;
+static uint64_t ginsertions;
 
 static int use_htm;
 
@@ -84,8 +84,8 @@ test_hash_multiwriter_worker(void *arg)
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
-	rte_atomic64_add(&gcycles, cycles);
-	rte_atomic64_add(&ginsertions, i - offset);
+	__atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
 
 	for (; i < offset + tbl_multiwriter_test_params.nb_tsx_insertion; i++)
 		tbl_multiwriter_test_params.keys[i]
@@ -168,11 +168,8 @@ test_hash_multiwriter(void)
 
 	tbl_multiwriter_test_params.found = found;
 
-	rte_atomic64_init(&gcycles);
-	rte_atomic64_clear(&gcycles);
-
-	rte_atomic64_init(&ginsertions);
-	rte_atomic64_clear(&ginsertions);
+	__atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
 
 	/* Get list of enabled cores */
 	i = 0;
@@ -238,8 +235,8 @@ test_hash_multiwriter(void)
 	printf("No key corrupted during multiwriter insertion.\n");
 
 	unsigned long long int cycles_per_insertion =
-		rte_atomic64_read(&gcycles)/
-		rte_atomic64_read(&ginsertions);
+		__atomic_load_n(&gcycles, __ATOMIC_RELAXED)/
+		__atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
 
 	printf(" cycles per insertion: %llu\n", cycles_per_insertion);
 
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 4860768a6491..9b192f2b5e7c 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -45,14 +45,14 @@ struct {
 	struct rte_hash *h;
 } tbl_rw_test_param;
 
-static rte_atomic64_t gcycles;
-static rte_atomic64_t ginsertions;
+static uint64_t gcycles;
+static uint64_t ginsertions;
 
-static rte_atomic64_t gread_cycles;
-static rte_atomic64_t gwrite_cycles;
+static uint64_t gread_cycles;
+static uint64_t gwrite_cycles;
 
-static rte_atomic64_t greads;
-static rte_atomic64_t gwrites;
+static uint64_t greads;
+static uint64_t gwrites;
 
 static int
 test_hash_readwrite_worker(__rte_unused void *arg)
@@ -110,8 +110,8 @@ test_hash_readwrite_worker(__rte_unused void *arg)
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
-	rte_atomic64_add(&gcycles, cycles);
-	rte_atomic64_add(&ginsertions, i - offset);
+	__atomic_fetch_add(&gcycles, cycles, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&ginsertions, i - offset, __ATOMIC_RELAXED);
 
 	for (; i < offset + tbl_rw_test_param.num_insert; i++)
 		tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
@@ -209,11 +209,8 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
 	int worker_cnt = rte_lcore_count() - 1;
 	uint32_t tot_insert = 0;
 
-	rte_atomic64_init(&gcycles);
-	rte_atomic64_clear(&gcycles);
-
-	rte_atomic64_init(&ginsertions);
-	rte_atomic64_clear(&ginsertions);
+	__atomic_store_n(&gcycles, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&ginsertions, 0, __ATOMIC_RELAXED);
 
 	if (init_params(use_ext, use_htm, use_rw_lf, use_jhash) != 0)
 		goto err;
@@ -272,8 +269,8 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
 	printf("No key corrupted during read-write test.\n");
 
 	unsigned long long int cycles_per_insertion =
-		rte_atomic64_read(&gcycles) /
-		rte_atomic64_read(&ginsertions);
+		__atomic_load_n(&gcycles, __ATOMIC_RELAXED) /
+		__atomic_load_n(&ginsertions, __ATOMIC_RELAXED);
 
 	printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
 
@@ -313,8 +310,8 @@ test_rw_reader(void *arg)
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
-	rte_atomic64_add(&gread_cycles, cycles);
-	rte_atomic64_add(&greads, i);
+	__atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&greads, i, __ATOMIC_RELAXED);
 	return 0;
 }
 
@@ -347,8 +344,9 @@ test_rw_writer(void *arg)
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
-	rte_atomic64_add(&gwrite_cycles, cycles);
-	rte_atomic64_add(&gwrites, tbl_rw_test_param.num_insert);
+	__atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&gwrites, tbl_rw_test_param.num_insert,
+							__ATOMIC_RELAXED);
 	return 0;
 }
 
@@ -371,15 +369,11 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 	uint64_t start = 0, end = 0;
 
-	rte_atomic64_init(&greads);
-	rte_atomic64_init(&gwrites);
-	rte_atomic64_clear(&gwrites);
-	rte_atomic64_clear(&greads);
+	__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
 
-	rte_atomic64_init(&gread_cycles);
-	rte_atomic64_clear(&gread_cycles);
-	rte_atomic64_init(&gwrite_cycles);
-	rte_atomic64_clear(&gwrite_cycles);
+	__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
 
 	if (init_params(0, use_htm, 0, use_jhash) != 0)
 		goto err;
@@ -436,10 +430,10 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 		if (tot_worker_lcore < core_cnt[n] * 2)
 			goto finish;
 
-		rte_atomic64_clear(&greads);
-		rte_atomic64_clear(&gread_cycles);
-		rte_atomic64_clear(&gwrites);
-		rte_atomic64_clear(&gwrite_cycles);
+		__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
 
 		rte_hash_reset(tbl_rw_test_param.h);
 
@@ -481,8 +475,8 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 		if (reader_faster) {
 			unsigned long long int cycles_per_insertion =
-				rte_atomic64_read(&gread_cycles) /
-				rte_atomic64_read(&greads);
+				__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
+				__atomic_load_n(&greads, __ATOMIC_RELAXED);
 			perf_results->read_only[n] = cycles_per_insertion;
 			printf("Reader only: cycles per lookup: %llu\n",
 							cycles_per_insertion);
@@ -490,17 +484,17 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 		else {
 			unsigned long long int cycles_per_insertion =
-				rte_atomic64_read(&gwrite_cycles) /
-				rte_atomic64_read(&gwrites);
+				__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
+				__atomic_load_n(&gwrites, __ATOMIC_RELAXED);
 			perf_results->write_only[n] = cycles_per_insertion;
 			printf("Writer only: cycles per writes: %llu\n",
 							cycles_per_insertion);
 		}
 
-		rte_atomic64_clear(&greads);
-		rte_atomic64_clear(&gread_cycles);
-		rte_atomic64_clear(&gwrites);
-		rte_atomic64_clear(&gwrite_cycles);
+		__atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
+		__atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
 
 		rte_hash_reset(tbl_rw_test_param.h);
 
@@ -575,8 +569,8 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 		if (reader_faster) {
 			unsigned long long int cycles_per_insertion =
-				rte_atomic64_read(&gread_cycles) /
-				rte_atomic64_read(&greads);
+				__atomic_load_n(&gread_cycles, __ATOMIC_RELAXED) /
+				__atomic_load_n(&greads, __ATOMIC_RELAXED);
 			perf_results->read_write_r[n] = cycles_per_insertion;
 			printf("Read-write cycles per lookup: %llu\n",
 							cycles_per_insertion);
@@ -584,8 +578,8 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 
 		else {
 			unsigned long long int cycles_per_insertion =
-				rte_atomic64_read(&gwrite_cycles) /
-				rte_atomic64_read(&gwrites);
+				__atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
+				__atomic_load_n(&gwrites, __ATOMIC_RELAXED);
 			perf_results->read_write_w[n] = cycles_per_insertion;
 			printf("Read-write cycles per writes: %llu\n",
 							cycles_per_insertion);
-- 
2.25.1


             reply	other threads:[~2021-09-22 21:52 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-22 21:52 Dharmik Thakkar [this message]
2021-10-02 15:15 ` David Marchand
2021-10-04 16:37   ` Wang, Yipeng1
2021-10-17 14:09     ` David Marchand
2021-10-18 18:04 ` David Christensen
2021-10-19 14:29 ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210922215205.2638916-1-dharmik.thakkar@arm.com \
    --to=dharmik.thakkar@arm.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=joyce.kong@arm.com \
    --cc=nd@arm.com \
    --cc=ruifeng.wang@arm.com \
    --cc=sameh.gobriel@intel.com \
    --cc=vladimir.medvedkin@intel.com \
    --cc=yipeng1.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).