DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] rcu/test: make gloabl variable per core
@ 2019-05-15 20:42 Honnappa Nagarahalli
  2019-05-15 20:42 ` Honnappa Nagarahalli
  2019-05-16  1:14 ` [dpdk-dev] [PATCH v2] " Honnappa Nagarahalli
  0 siblings, 2 replies; 8+ messages in thread
From: Honnappa Nagarahalli @ 2019-05-15 20:42 UTC (permalink / raw)
  To: honnappa.nagarahalli, david.marchand, dev; +Cc: dharmik.thakkar, nd

Each hash entry has a pointer to one uint32 memory location.
However, all the readers increment the same location causing
race conditions. Allocate memory for each thread so that each
thread will increment its own memory location.

Reported-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
Tested-by: David Marchand <david.marchand@redhat.com>
---

This patch is dependent on http://patchwork.dpdk.org/patch/53421

 app/test/test_rcu_qsbr.c      | 163 ++++++++++++++++++++++------------
 app/test/test_rcu_qsbr_perf.c | 105 +++++++++++-----------
 2 files changed, 160 insertions(+), 108 deletions(-)

diff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c
index 92ab0c20a..5f0b1383e 100644
--- a/app/test/test_rcu_qsbr.c
+++ b/app/test/test_rcu_qsbr.c
@@ -40,6 +40,16 @@ static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];
 struct rte_hash *h[TEST_RCU_MAX_LCORE];
 char hash_name[TEST_RCU_MAX_LCORE][8];
 
+struct test_rcu_thread_info {
+	/* Index in RCU array */
+	int ir;
+	/* Index in hash array */
+	int ih;
+	/* lcore IDs registered on the RCU variable */
+	uint16_t r_core_ids[2];
+};
+struct test_rcu_thread_info thread_info[TEST_RCU_MAX_LCORE/4];
+
 static inline int
 get_enabled_cores_mask(void)
 {
@@ -629,11 +639,12 @@ test_rcu_qsbr_reader(void *arg)
 	struct rte_hash *hash = NULL;
 	int i;
 	uint32_t lcore_id = rte_lcore_id();
-	uint8_t read_type = (uint8_t)((uintptr_t)arg);
+	struct test_rcu_thread_info *ti;
 	uint32_t *pdata;
 
-	temp = t[read_type];
-	hash = h[read_type];
+	ti = (struct test_rcu_thread_info *)arg;
+	temp = t[ti->ir];
+	hash = h[ti->ih];
 
 	do {
 		rte_rcu_qsbr_thread_register(temp, lcore_id);
@@ -642,9 +653,9 @@ test_rcu_qsbr_reader(void *arg)
 			rte_rcu_qsbr_lock(temp, lcore_id);
 			if (rte_hash_lookup_data(hash, keys+i,
 					(void **)&pdata) != -ENOENT) {
-				*pdata = 0;
-				while (*pdata < COUNTER_VALUE)
-					++*pdata;
+				pdata[lcore_id] = 0;
+				while (pdata[lcore_id] < COUNTER_VALUE)
+					pdata[lcore_id]++;
 			}
 			rte_rcu_qsbr_unlock(temp, lcore_id);
 		}
@@ -661,44 +672,42 @@ static int
 test_rcu_qsbr_writer(void *arg)
 {
 	uint64_t token;
-	int32_t pos;
+	int32_t i, pos, del;
+	uint32_t c;
 	struct rte_rcu_qsbr *temp;
 	struct rte_hash *hash = NULL;
-	uint8_t writer_type = (uint8_t)((uintptr_t)arg);
+	struct test_rcu_thread_info *ti;
 
-	temp = t[(writer_type/2) % TEST_RCU_MAX_LCORE];
-	hash = h[(writer_type/2) % TEST_RCU_MAX_LCORE];
+	ti = (struct test_rcu_thread_info *)arg;
+	temp = t[ti->ir];
+	hash = h[ti->ih];
 
 	/* Delete element from the shared data structure */
-	pos = rte_hash_del_key(hash, keys + (writer_type % TOTAL_ENTRY));
+	del = rte_lcore_id() % TOTAL_ENTRY;
+	pos = rte_hash_del_key(hash, keys + del);
 	if (pos < 0) {
-		printf("Delete key failed #%d\n",
-		       keys[writer_type % TOTAL_ENTRY]);
+		printf("Delete key failed #%d\n", keys[del]);
 		return -1;
 	}
 	/* Start the quiescent state query process */
 	token = rte_rcu_qsbr_start(temp);
 	/* Check the quiescent state status */
 	rte_rcu_qsbr_check(temp, token, true);
-	if (*hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]
-	    [writer_type % TOTAL_ENTRY] != COUNTER_VALUE &&
-	    *hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]
-	    [writer_type % TOTAL_ENTRY] != 0) {
-		printf("Reader did not complete #%d = %d\t", writer_type,
-			*hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]
-				[writer_type % TOTAL_ENTRY]);
-		return -1;
+	for (i = 0; i < 2; i++) {
+		c = hash_data[ti->ih][del][ti->r_core_ids[i]];
+		if (c != COUNTER_VALUE && c != 0) {
+			printf("Reader lcore id %u did not complete = %u\t",
+				rte_lcore_id(), c);
+			return -1;
+		}
 	}
 
 	if (rte_hash_free_key_with_position(hash, pos) < 0) {
-		printf("Failed to free the key #%d\n",
-		       keys[writer_type % TOTAL_ENTRY]);
+		printf("Failed to free the key #%d\n", keys[del]);
 		return -1;
 	}
-	rte_free(hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]
-				[writer_type % TOTAL_ENTRY]);
-	hash_data[(writer_type/2) % TEST_RCU_MAX_LCORE]
-			[writer_type % TOTAL_ENTRY] = NULL;
+	rte_free(hash_data[ti->ih][del]);
+	hash_data[ti->ih][del] = NULL;
 
 	return 0;
 }
@@ -728,7 +737,9 @@ init_hash(int hash_id)
 	}
 
 	for (i = 0; i < TOTAL_ENTRY; i++) {
-		hash_data[hash_id][i] = rte_zmalloc(NULL, sizeof(uint32_t), 0);
+		hash_data[hash_id][i] =
+			rte_zmalloc(NULL,
+				sizeof(uint32_t) * TEST_RCU_MAX_LCORE, 0);
 		if (hash_data[hash_id][i] == NULL) {
 			printf("No memory\n");
 			return NULL;
@@ -762,6 +773,7 @@ static int
 test_rcu_qsbr_sw_sv_3qs(void)
 {
 	uint64_t token[3];
+	uint32_t c;
 	int i;
 	int32_t pos[3];
 
@@ -778,9 +790,15 @@ test_rcu_qsbr_sw_sv_3qs(void)
 		goto error;
 	}
 
+	/* No need to fill the registered core IDs as the writer
+	 * thread is not launched.
+	 */
+	thread_info[0].ir = 0;
+	thread_info[0].ih = 0;
+
 	/* Reader threads are launched */
 	for (i = 0; i < 4; i++)
-		rte_eal_remote_launch(test_rcu_qsbr_reader, NULL,
+		rte_eal_remote_launch(test_rcu_qsbr_reader, &thread_info[0],
 					enabled_core_ids[i]);
 
 	/* Delete element from the shared data structure */
@@ -812,9 +830,13 @@ test_rcu_qsbr_sw_sv_3qs(void)
 
 	/* Check the quiescent state status */
 	rte_rcu_qsbr_check(t[0], token[0], true);
-	if (*hash_data[0][0] != COUNTER_VALUE && *hash_data[0][0] != 0) {
-		printf("Reader did not complete #0 = %d\n", *hash_data[0][0]);
-		goto error;
+	for (i = 0; i < 4; i++) {
+		c = hash_data[0][0][enabled_core_ids[i]];
+		if (c != COUNTER_VALUE && c != 0) {
+			printf("Reader lcore %d did not complete #0 = %d\n",
+				enabled_core_ids[i], c);
+			goto error;
+		}
 	}
 
 	if (rte_hash_free_key_with_position(h[0], pos[0]) < 0) {
@@ -826,9 +848,13 @@ test_rcu_qsbr_sw_sv_3qs(void)
 
 	/* Check the quiescent state status */
 	rte_rcu_qsbr_check(t[0], token[1], true);
-	if (*hash_data[0][3] != COUNTER_VALUE && *hash_data[0][3] != 0) {
-		printf("Reader did not complete #3 = %d\n", *hash_data[0][3]);
-		goto error;
+	for (i = 0; i < 4; i++) {
+		c = hash_data[0][3][enabled_core_ids[i]];
+		if (c != COUNTER_VALUE && c != 0) {
+			printf("Reader lcore %d did not complete #3 = %d\n",
+				enabled_core_ids[i], c);
+			goto error;
+		}
 	}
 
 	if (rte_hash_free_key_with_position(h[0], pos[1]) < 0) {
@@ -840,9 +866,13 @@ test_rcu_qsbr_sw_sv_3qs(void)
 
 	/* Check the quiescent state status */
 	rte_rcu_qsbr_check(t[0], token[2], true);
-	if (*hash_data[0][6] != COUNTER_VALUE && *hash_data[0][6] != 0) {
-		printf("Reader did not complete #6 = %d\n", *hash_data[0][6]);
-		goto error;
+	for (i = 0; i < 4; i++) {
+		c = hash_data[0][6][enabled_core_ids[i]];
+		if (c != COUNTER_VALUE && c != 0) {
+			printf("Reader lcore %d did not complete #6 = %d\n",
+				enabled_core_ids[i], c);
+			goto error;
+		}
 	}
 
 	if (rte_hash_free_key_with_position(h[0], pos[2]) < 0) {
@@ -889,42 +919,61 @@ test_rcu_qsbr_mw_mv_mqs(void)
 	test_cores = num_cores / 4;
 	test_cores = test_cores * 4;
 
-	printf("Test: %d writers, %d QSBR variable, simultaneous QSBR queries\n"
-	       , test_cores / 2, test_cores / 4);
+	printf("Test: %d writers, %d QSBR variable, simultaneous QSBR queries\n",
+	       test_cores / 2, test_cores / 4);
 
-	for (i = 0; i < num_cores / 4; i++) {
+	for (i = 0; i < test_cores / 4; i++) {
+		j = i * 4;
 		rte_rcu_qsbr_init(t[i], TEST_RCU_MAX_LCORE);
 		h[i] = init_hash(i);
 		if (h[i] == NULL) {
 			printf("Hash init failed\n");
 			goto error;
 		}
-	}
+		thread_info[i].ir = i;
+		thread_info[i].ih = i;
+		thread_info[i].r_core_ids[0] = enabled_core_ids[j];
+		thread_info[i].r_core_ids[1] = enabled_core_ids[j + 1];
 
-	/* Reader threads are launched */
-	for (i = 0; i < test_cores / 2; i++)
+		/* Reader threads are launched */
 		rte_eal_remote_launch(test_rcu_qsbr_reader,
-				      (void *)(uintptr_t)(i / 2),
-					enabled_core_ids[i]);
+					(void *)&thread_info[i],
+					enabled_core_ids[j]);
+		rte_eal_remote_launch(test_rcu_qsbr_reader,
+					(void *)&thread_info[i],
+					enabled_core_ids[j + 1]);
 
-	/* Writer threads are launched */
-	for (; i < test_cores; i++)
+		/* Writer threads are launched */
 		rte_eal_remote_launch(test_rcu_qsbr_writer,
-				      (void *)(uintptr_t)(i - (test_cores / 2)),
-					enabled_core_ids[i]);
+					(void *)&thread_info[i],
+					enabled_core_ids[j + 2]);
+		rte_eal_remote_launch(test_rcu_qsbr_writer,
+					(void *)&thread_info[i],
+					enabled_core_ids[j + 3]);
+	}
+
 	/* Wait and check return value from writer threads */
-	for (i = test_cores / 2; i < test_cores;  i++)
-		if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
+	for (i = 0; i < test_cores / 4; i++) {
+		j = i * 4;
+		if (rte_eal_wait_lcore(enabled_core_ids[j + 2]) < 0)
 			goto error;
 
+		if (rte_eal_wait_lcore(enabled_core_ids[j + 3]) < 0)
+			goto error;
+	}
 	writer_done = 1;
 
 	/* Wait and check return value from reader threads */
-	for (i = 0; i < test_cores / 2; i++)
-		if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
+	for (i = 0; i < test_cores / 4; i++) {
+		j = i * 4;
+		if (rte_eal_wait_lcore(enabled_core_ids[j]) < 0)
 			goto error;
 
-	for (i = 0; i < num_cores / 4; i++)
+		if (rte_eal_wait_lcore(enabled_core_ids[j + 1]) < 0)
+			goto error;
+	}
+
+	for (i = 0; i < test_cores / 4; i++)
 		rte_hash_free(h[i]);
 
 	rte_free(keys);
@@ -936,10 +985,10 @@ test_rcu_qsbr_mw_mv_mqs(void)
 	/* Wait until all readers and writers have exited */
 	rte_eal_mp_wait_lcore();
 
-	for (i = 0; i < num_cores / 4; i++)
+	for (i = 0; i < test_cores / 4; i++)
 		rte_hash_free(h[i]);
 	rte_free(keys);
-	for (j = 0; j < TEST_RCU_MAX_LCORE; j++)
+	for (j = 0; j < test_cores / 4; j++)
 		for (i = 0; i < TOTAL_ENTRY; i++)
 			rte_free(hash_data[j][i]);
 
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index 6b1912c0c..33ca36c63 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -23,14 +23,14 @@ static uint8_t num_cores;
 static uint32_t *keys;
 #define TOTAL_ENTRY (1024 * 8)
 #define COUNTER_VALUE 4096
-static uint32_t *hash_data[TEST_RCU_MAX_LCORE][TOTAL_ENTRY];
+static uint32_t *hash_data[TOTAL_ENTRY];
 static volatile uint8_t writer_done;
 static volatile uint8_t all_registered;
 static volatile uint32_t thr_id;
 
 static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];
-static struct rte_hash *h[TEST_RCU_MAX_LCORE];
-static char hash_name[TEST_RCU_MAX_LCORE][8];
+static struct rte_hash *h;
+static char hash_name[8];
 static rte_atomic64_t updates, checks;
 static rte_atomic64_t update_cycles, check_cycles;
 
@@ -309,7 +309,7 @@ test_rcu_qsbr_hash_reader(void *arg)
 	uint32_t *pdata;
 
 	temp = t[read_type];
-	hash = h[read_type];
+	hash = h;
 
 	rte_rcu_qsbr_thread_register(temp, thread_id);
 
@@ -319,11 +319,11 @@ test_rcu_qsbr_hash_reader(void *arg)
 		rte_rcu_qsbr_thread_online(temp, thread_id);
 		for (i = 0; i < TOTAL_ENTRY; i++) {
 			rte_rcu_qsbr_lock(temp, thread_id);
-			if (rte_hash_lookup_data(hash, keys+i,
+			if (rte_hash_lookup_data(hash, keys + i,
 					(void **)&pdata) != -ENOENT) {
-				*pdata = 0;
-				while (*pdata < COUNTER_VALUE)
-					++*pdata;
+				pdata[thread_id] = 0;
+				while (pdata[thread_id] < COUNTER_VALUE)
+					pdata[thread_id]++;
 			}
 			rte_rcu_qsbr_unlock(temp, thread_id);
 		}
@@ -342,13 +342,12 @@ test_rcu_qsbr_hash_reader(void *arg)
 	return 0;
 }
 
-static struct rte_hash *
-init_hash(int hash_id)
+static struct rte_hash *init_hash(void)
 {
 	int i;
-	struct rte_hash *h = NULL;
+	struct rte_hash *hash = NULL;
 
-	sprintf(hash_name[hash_id], "hash%d", hash_id);
+	snprintf(hash_name, 8, "hash");
 	struct rte_hash_parameters hash_params = {
 		.entries = TOTAL_ENTRY,
 		.key_len = sizeof(uint32_t),
@@ -357,18 +356,19 @@ init_hash(int hash_id)
 		.hash_func = rte_hash_crc,
 		.extra_flag =
 			RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF,
-		.name = hash_name[hash_id],
+		.name = hash_name,
 	};
 
-	h = rte_hash_create(&hash_params);
-	if (h == NULL) {
+	hash = rte_hash_create(&hash_params);
+	if (hash == NULL) {
 		printf("Hash create Failed\n");
 		return NULL;
 	}
 
 	for (i = 0; i < TOTAL_ENTRY; i++) {
-		hash_data[hash_id][i] = rte_zmalloc(NULL, sizeof(uint32_t), 0);
-		if (hash_data[hash_id][i] == NULL) {
+		hash_data[i] = rte_zmalloc(NULL,
+				sizeof(uint32_t) * TEST_RCU_MAX_LCORE, 0);
+		if (hash_data[i] == NULL) {
 			printf("No memory\n");
 			return NULL;
 		}
@@ -383,14 +383,13 @@ init_hash(int hash_id)
 		keys[i] = i;
 
 	for (i = 0; i < TOTAL_ENTRY; i++) {
-		if (rte_hash_add_key_data(h, keys + i,
-				(void *)((uintptr_t)hash_data[hash_id][i]))
-				< 0) {
+		if (rte_hash_add_key_data(hash, keys + i,
+				(void *)((uintptr_t)hash_data[i])) < 0) {
 			printf("Hash key add Failed #%d\n", i);
 			return NULL;
 		}
 	}
-	return h;
+	return hash;
 }
 
 /*
@@ -401,7 +400,7 @@ static int
 test_rcu_qsbr_sw_sv_1qs(void)
 {
 	uint64_t token, begin, cycles;
-	int i, tmp_num_cores, sz;
+	int i, j, tmp_num_cores, sz;
 	int32_t pos;
 
 	writer_done = 0;
@@ -427,8 +426,8 @@ test_rcu_qsbr_sw_sv_1qs(void)
 	rte_rcu_qsbr_init(t[0], tmp_num_cores);
 
 	/* Shared data structure created */
-	h[0] = init_hash(0);
-	if (h[0] == NULL) {
+	h = init_hash();
+	if (h == NULL) {
 		printf("Hash init failed\n");
 		goto error;
 	}
@@ -442,7 +441,7 @@ test_rcu_qsbr_sw_sv_1qs(void)
 
 	for (i = 0; i < TOTAL_ENTRY; i++) {
 		/* Delete elements from the shared data structure */
-		pos = rte_hash_del_key(h[0], keys + i);
+		pos = rte_hash_del_key(h, keys + i);
 		if (pos < 0) {
 			printf("Delete key failed #%d\n", keys[i]);
 			goto error;
@@ -452,19 +451,21 @@ test_rcu_qsbr_sw_sv_1qs(void)
 
 		/* Check the quiescent state status */
 		rte_rcu_qsbr_check(t[0], token, true);
-		if (*hash_data[0][i] != COUNTER_VALUE &&
-			*hash_data[0][i] != 0) {
-			printf("Reader did not complete #%d =  %d\n", i,
-							*hash_data[0][i]);
-			goto error;
+		for (j = 0; j < tmp_num_cores; j++) {
+			if (hash_data[i][j] != COUNTER_VALUE &&
+				hash_data[i][j] != 0) {
+				printf("Reader thread ID %u did not complete #%d =  %d\n",
+					j, i, hash_data[i][j]);
+				goto error;
+			}
 		}
 
-		if (rte_hash_free_key_with_position(h[0], pos) < 0) {
+		if (rte_hash_free_key_with_position(h, pos) < 0) {
 			printf("Failed to free the key #%d\n", keys[i]);
 			goto error;
 		}
-		rte_free(hash_data[0][i]);
-		hash_data[0][i] = NULL;
+		rte_free(hash_data[i]);
+		hash_data[i] = NULL;
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
@@ -477,7 +478,7 @@ test_rcu_qsbr_sw_sv_1qs(void)
 	for (i = 0; i < num_cores; i++)
 		if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 			goto error;
-	rte_hash_free(h[0]);
+	rte_hash_free(h);
 	rte_free(keys);
 
 	printf("Following numbers include calls to rte_hash functions\n");
@@ -498,10 +499,10 @@ test_rcu_qsbr_sw_sv_1qs(void)
 	/* Wait until all readers have exited */
 	rte_eal_mp_wait_lcore();
 
-	rte_hash_free(h[0]);
+	rte_hash_free(h);
 	rte_free(keys);
 	for (i = 0; i < TOTAL_ENTRY; i++)
-		rte_free(hash_data[0][i]);
+		rte_free(hash_data[i]);
 
 	rte_free(t[0]);
 
@@ -517,7 +518,7 @@ static int
 test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 {
 	uint64_t token, begin, cycles;
-	int i, ret, tmp_num_cores, sz;
+	int i, j, ret, tmp_num_cores, sz;
 	int32_t pos;
 
 	writer_done = 0;
@@ -538,8 +539,8 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 	rte_rcu_qsbr_init(t[0], tmp_num_cores);
 
 	/* Shared data structure created */
-	h[0] = init_hash(0);
-	if (h[0] == NULL) {
+	h = init_hash();
+	if (h == NULL) {
 		printf("Hash init failed\n");
 		goto error;
 	}
@@ -553,7 +554,7 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 
 	for (i = 0; i < TOTAL_ENTRY; i++) {
 		/* Delete elements from the shared data structure */
-		pos = rte_hash_del_key(h[0], keys + i);
+		pos = rte_hash_del_key(h, keys + i);
 		if (pos < 0) {
 			printf("Delete key failed #%d\n", keys[i]);
 			goto error;
@@ -565,19 +566,21 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 		do {
 			ret = rte_rcu_qsbr_check(t[0], token, false);
 		} while (ret == 0);
-		if (*hash_data[0][i] != COUNTER_VALUE &&
-			*hash_data[0][i] != 0) {
-			printf("Reader did not complete  #%d = %d\n", i,
-							*hash_data[0][i]);
-			goto error;
+		for (j = 0; j < tmp_num_cores; j++) {
+			if (hash_data[i][j] != COUNTER_VALUE &&
+				hash_data[i][j] != 0) {
+				printf("Reader thread ID %u did not complete #%d =  %d\n",
+					j, i, hash_data[i][j]);
+				goto error;
+			}
 		}
 
-		if (rte_hash_free_key_with_position(h[0], pos) < 0) {
+		if (rte_hash_free_key_with_position(h, pos) < 0) {
 			printf("Failed to free the key #%d\n", keys[i]);
 			goto error;
 		}
-		rte_free(hash_data[0][i]);
-		hash_data[0][i] = NULL;
+		rte_free(hash_data[i]);
+		hash_data[i] = NULL;
 	}
 
 	cycles = rte_rdtsc_precise() - begin;
@@ -589,7 +592,7 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 	for (i = 0; i < num_cores; i++)
 		if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 			goto error;
-	rte_hash_free(h[0]);
+	rte_hash_free(h);
 	rte_free(keys);
 
 	printf("Following numbers include calls to rte_hash functions\n");
@@ -610,10 +613,10 @@ test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
 	/* Wait until all readers have exited */
 	rte_eal_mp_wait_lcore();
 
-	rte_hash_free(h[0]);
+	rte_hash_free(h);
 	rte_free(keys);
 	for (i = 0; i < TOTAL_ENTRY; i++)
-		rte_free(hash_data[0][i]);
+		rte_free(hash_data[i]);
 
 	rte_free(t[0]);
 
-- 
2.17.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-06-09  8:14 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-15 20:42 [dpdk-dev] [PATCH] rcu/test: make gloabl variable per core Honnappa Nagarahalli
2019-05-15 20:42 ` Honnappa Nagarahalli
2019-05-16  1:14 ` [dpdk-dev] [PATCH v2] " Honnappa Nagarahalli
2019-05-16  1:14   ` Honnappa Nagarahalli
2019-06-05 10:50   ` Thomas Monjalon
2019-06-05 12:16     ` Thomas Monjalon
2019-06-07  6:30     ` Honnappa Nagarahalli
2019-06-09  8:13       ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).