From: Yipeng Wang <yipeng1.wang@intel.com>
To: bruce.richardson@intel.com
Cc: konstantin.ananyev@intel.com, dev@dpdk.org,
yipeng1.wang@intel.com, honnappa.nagarahalli@arm.com,
sameh.gobriel@intel.com, dharmik.thakkar@arm.com,
qiaobinf@bu.edu, michel@digirati.com.br
Subject: [dpdk-dev] [PATCH v7 3/4] test/hash: implement extendable bucket hash test
Date: Wed, 10 Oct 2018 14:27:41 -0700 [thread overview]
Message-ID: <1539206862-306341-4-git-send-email-yipeng1.wang@intel.com> (raw)
In-Reply-To: <1539206862-306341-1-git-send-email-yipeng1.wang@intel.com>
This commit changes the current rte_hash unit test to
test the extendable table feature and performance.
Signed-off-by: Yipeng Wang <yipeng1.wang@intel.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Acked-by: Dharmik Thakkar <dharmik.thakkar@arm.com>
---
test/test/test_hash.c | 159 +++++++++++++++++++++++++++++++++++++++++++--
test/test/test_hash_perf.c | 114 +++++++++++++++++++++++---------
2 files changed, 238 insertions(+), 35 deletions(-)
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index b3db9fd..815c734 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -660,6 +660,116 @@ static int test_full_bucket(void)
return 0;
}
+/*
+ * Similar to the test above (full bucket test), but for extendable buckets.
+ */
+static int test_extendable_bucket(void)
+{
+ struct rte_hash_parameters params_pseudo_hash = {
+ .name = "test5",
+ .entries = 64,
+ .key_len = sizeof(struct flow_key), /* 13 */
+ .hash_func = pseudo_hash,
+ .hash_func_init_val = 0,
+ .socket_id = 0,
+ .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE
+ };
+ struct rte_hash *handle;
+ int pos[64];
+ int expected_pos[64];
+ unsigned int i;
+ struct flow_key rand_keys[64];
+
+ for (i = 0; i < 64; i++) {
+ rand_keys[i].port_dst = i;
+ rand_keys[i].port_src = i+1;
+ }
+
+ handle = rte_hash_create(¶ms_pseudo_hash);
+ RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+ /* Fill bucket */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_add_key(handle, &rand_keys[i]);
+ print_key_info("Add", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] < 0,
+ "failed to add key (pos[%u]=%d)", i, pos[i]);
+ expected_pos[i] = pos[i];
+ }
+
+ /* Lookup */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_lookup(handle, &rand_keys[i]);
+ print_key_info("Lkp", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] != expected_pos[i],
+ "failed to find key (pos[%u]=%d)", i, pos[i]);
+ }
+
+ /* Add - update */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_add_key(handle, &rand_keys[i]);
+ print_key_info("Add", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] != expected_pos[i],
+ "failed to add key (pos[%u]=%d)", i, pos[i]);
+ }
+
+ /* Lookup */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_lookup(handle, &rand_keys[i]);
+ print_key_info("Lkp", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] != expected_pos[i],
+ "failed to find key (pos[%u]=%d)", i, pos[i]);
+ }
+
+ /* Delete 1 key, check other keys are still found */
+ pos[35] = rte_hash_del_key(handle, &rand_keys[35]);
+ print_key_info("Del", &rand_keys[35], pos[35]);
+ RETURN_IF_ERROR(pos[35] != expected_pos[35],
+ "failed to delete key (pos[1]=%d)", pos[35]);
+ pos[20] = rte_hash_lookup(handle, &rand_keys[20]);
+ print_key_info("Lkp", &rand_keys[20], pos[20]);
+ RETURN_IF_ERROR(pos[20] != expected_pos[20],
+ "failed lookup after deleting key from same bucket "
+ "(pos[20]=%d)", pos[20]);
+
+ /* Go back to previous state */
+ pos[35] = rte_hash_add_key(handle, &rand_keys[35]);
+ print_key_info("Add", &rand_keys[35], pos[35]);
+ expected_pos[35] = pos[35];
+ RETURN_IF_ERROR(pos[35] < 0, "failed to add key (pos[1]=%d)", pos[35]);
+
+ /* Delete */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_del_key(handle, &rand_keys[i]);
+ print_key_info("Del", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] != expected_pos[i],
+ "failed to delete key (pos[%u]=%d)", i, pos[i]);
+ }
+
+ /* Lookup */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_lookup(handle, &rand_keys[i]);
+ print_key_info("Lkp", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] != -ENOENT,
+ "fail: found non-existent key (pos[%u]=%d)", i, pos[i]);
+ }
+
+ /* Add again */
+ for (i = 0; i < 64; i++) {
+ pos[i] = rte_hash_add_key(handle, &rand_keys[i]);
+ print_key_info("Add", &rand_keys[i], pos[i]);
+ RETURN_IF_ERROR(pos[i] < 0,
+ "failed to add key (pos[%u]=%d)", i, pos[i]);
+ expected_pos[i] = pos[i];
+ }
+
+ rte_hash_free(handle);
+
+ /* Cover the NULL case. */
+ rte_hash_free(0);
+ return 0;
+}
+
/******************************************************************************/
static int
fbk_hash_unit_test(void)
@@ -1096,7 +1206,7 @@ test_hash_creation_with_good_parameters(void)
* Test to see the average table utilization (entries added/max entries)
* before hitting a random entry that cannot be added
*/
-static int test_average_table_utilization(void)
+static int test_average_table_utilization(uint32_t ext_table)
{
struct rte_hash *handle;
uint8_t simple_key[MAX_KEYSIZE];
@@ -1107,12 +1217,23 @@ static int test_average_table_utilization(void)
printf("\n# Running test to determine average utilization"
"\n before adding elements begins to fail\n");
+ if (ext_table)
+ printf("ext table is enabled\n");
+ else
+ printf("ext table is disabled\n");
+
printf("Measuring performance, please wait");
fflush(stdout);
ut_params.entries = 1 << 16;
ut_params.name = "test_average_utilization";
ut_params.hash_func = rte_jhash;
+ if (ext_table)
+ ut_params.extra_flag |= RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
+ else
+ ut_params.extra_flag &= ~RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
+
handle = rte_hash_create(&ut_params);
+
RETURN_IF_ERROR(handle == NULL, "hash creation failed");
for (j = 0; j < ITERATIONS; j++) {
@@ -1139,6 +1260,14 @@ static int test_average_table_utilization(void)
rte_hash_free(handle);
return -1;
}
+ if (ext_table) {
+ if (cnt != ut_params.entries) {
+ printf("rte_hash_count returned wrong value "
+ "%u, %u, %u\n", j, added_keys, cnt);
+ rte_hash_free(handle);
+ return -1;
+ }
+ }
average_keys_added += added_keys;
@@ -1161,7 +1290,7 @@ static int test_average_table_utilization(void)
}
#define NUM_ENTRIES 256
-static int test_hash_iteration(void)
+static int test_hash_iteration(uint32_t ext_table)
{
struct rte_hash *handle;
unsigned i;
@@ -1177,6 +1306,11 @@ static int test_hash_iteration(void)
ut_params.name = "test_hash_iteration";
ut_params.hash_func = rte_jhash;
ut_params.key_len = 16;
+ if (ext_table)
+ ut_params.extra_flag |= RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
+ else
+ ut_params.extra_flag &= ~RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
+
handle = rte_hash_create(&ut_params);
RETURN_IF_ERROR(handle == NULL, "hash creation failed");
@@ -1186,8 +1320,13 @@ static int test_hash_iteration(void)
for (i = 0; i < ut_params.key_len; i++)
keys[added_keys][i] = rte_rand() % 255;
ret = rte_hash_add_key_data(handle, keys[added_keys], data[added_keys]);
- if (ret < 0)
+ if (ret < 0) {
+ if (ext_table) {
+ printf("Insertion failed for ext table\n");
+ goto err;
+ }
break;
+ }
}
/* Iterate through the hash table */
@@ -1474,6 +1613,8 @@ test_hash(void)
return -1;
if (test_full_bucket() < 0)
return -1;
+ if (test_extendable_bucket() < 0)
+ return -1;
if (test_fbk_hash_find_existing() < 0)
return -1;
@@ -1483,9 +1624,17 @@ test_hash(void)
return -1;
if (test_hash_creation_with_good_parameters() < 0)
return -1;
- if (test_average_table_utilization() < 0)
+
+ /* ext table disabled */
+ if (test_average_table_utilization(0) < 0)
+ return -1;
+ if (test_hash_iteration(0) < 0)
+ return -1;
+
+ /* ext table enabled */
+ if (test_average_table_utilization(1) < 0)
return -1;
- if (test_hash_iteration() < 0)
+ if (test_hash_iteration(1) < 0)
return -1;
run_hash_func_tests();
diff --git a/test/test/test_hash_perf.c b/test/test/test_hash_perf.c
index 0d39e10..5252111 100644
--- a/test/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
@@ -18,7 +18,8 @@
#include "test.h"
#define MAX_ENTRIES (1 << 19)
-#define KEYS_TO_ADD (MAX_ENTRIES * 3 / 4) /* 75% table utilization */
+#define KEYS_TO_ADD (MAX_ENTRIES)
+#define ADD_PERCENT 0.75 /* 75% table utilization */
#define NUM_LOOKUPS (KEYS_TO_ADD * 5) /* Loop among keys added, several times */
/* BUCKET_SIZE should be same as RTE_HASH_BUCKET_ENTRIES in rte_hash library */
#define BUCKET_SIZE 8
@@ -78,7 +79,7 @@ static struct rte_hash_parameters ut_params = {
static int
create_table(unsigned int with_data, unsigned int table_index,
- unsigned int with_locks)
+ unsigned int with_locks, unsigned int ext)
{
char name[RTE_HASH_NAMESIZE];
@@ -96,6 +97,9 @@ create_table(unsigned int with_data, unsigned int table_index,
else
ut_params.extra_flag = 0;
+ if (ext)
+ ut_params.extra_flag |= RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
+
ut_params.name = name;
ut_params.key_len = hashtest_key_lens[table_index];
ut_params.socket_id = rte_socket_id();
@@ -117,15 +121,21 @@ create_table(unsigned int with_data, unsigned int table_index,
/* Shuffle the keys that have been added, so lookups will be totally random */
static void
-shuffle_input_keys(unsigned table_index)
+shuffle_input_keys(unsigned int table_index, unsigned int ext)
{
unsigned i;
uint32_t swap_idx;
uint8_t temp_key[MAX_KEYSIZE];
hash_sig_t temp_signature;
int32_t temp_position;
+ unsigned int keys_to_add;
+
+ if (!ext)
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ else
+ keys_to_add = KEYS_TO_ADD;
- for (i = KEYS_TO_ADD - 1; i > 0; i--) {
+ for (i = keys_to_add - 1; i > 0; i--) {
swap_idx = rte_rand() % i;
memcpy(temp_key, keys[i], hashtest_key_lens[table_index]);
@@ -147,14 +157,20 @@ shuffle_input_keys(unsigned table_index)
* ALL can fit in hash table (no errors)
*/
static int
-get_input_keys(unsigned with_pushes, unsigned table_index)
+get_input_keys(unsigned int with_pushes, unsigned int table_index,
+ unsigned int ext)
{
unsigned i, j;
unsigned bucket_idx, incr, success = 1;
uint8_t k = 0;
int32_t ret;
const uint32_t bucket_bitmask = NUM_BUCKETS - 1;
+ unsigned int keys_to_add;
+ if (!ext)
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ else
+ keys_to_add = KEYS_TO_ADD;
/* Reset all arrays */
for (i = 0; i < MAX_ENTRIES; i++)
slot_taken[i] = 0;
@@ -171,7 +187,7 @@ get_input_keys(unsigned with_pushes, unsigned table_index)
* Regardless a key has been added correctly or not (success),
* the next one to try will be increased by 1.
*/
- for (i = 0; i < KEYS_TO_ADD;) {
+ for (i = 0; i < keys_to_add;) {
incr = 0;
if (i != 0) {
keys[i][0] = ++k;
@@ -235,14 +251,20 @@ get_input_keys(unsigned with_pushes, unsigned table_index)
}
static int
-timed_adds(unsigned with_hash, unsigned with_data, unsigned table_index)
+timed_adds(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i;
const uint64_t start_tsc = rte_rdtsc();
void *data;
int32_t ret;
+ unsigned int keys_to_add;
+ if (!ext)
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ else
+ keys_to_add = KEYS_TO_ADD;
- for (i = 0; i < KEYS_TO_ADD; i++) {
+ for (i = 0; i < keys_to_add; i++) {
data = (void *) ((uintptr_t) signatures[i]);
if (with_hash && with_data) {
ret = rte_hash_add_key_with_hash_data(h[table_index],
@@ -284,22 +306,31 @@ timed_adds(unsigned with_hash, unsigned with_data, unsigned table_index)
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][ADD][with_hash][with_data] = time_taken/KEYS_TO_ADD;
+ cycles[table_index][ADD][with_hash][with_data] = time_taken/keys_to_add;
return 0;
}
static int
-timed_lookups(unsigned with_hash, unsigned with_data, unsigned table_index)
+timed_lookups(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j;
const uint64_t start_tsc = rte_rdtsc();
void *ret_data;
void *expected_data;
int32_t ret;
-
- for (i = 0; i < NUM_LOOKUPS/KEYS_TO_ADD; i++) {
- for (j = 0; j < KEYS_TO_ADD; j++) {
+ unsigned int keys_to_add, num_lookups;
+
+ if (!ext) {
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ num_lookups = NUM_LOOKUPS * ADD_PERCENT;
+ } else {
+ keys_to_add = KEYS_TO_ADD;
+ num_lookups = NUM_LOOKUPS;
+ }
+ for (i = 0; i < num_lookups / keys_to_add; i++) {
+ for (j = 0; j < keys_to_add; j++) {
if (with_hash && with_data) {
ret = rte_hash_lookup_with_hash_data(h[table_index],
(const void *) keys[j],
@@ -352,13 +383,14 @@ timed_lookups(unsigned with_hash, unsigned with_data, unsigned table_index)
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP][with_hash][with_data] = time_taken/NUM_LOOKUPS;
+ cycles[table_index][LOOKUP][with_hash][with_data] = time_taken/num_lookups;
return 0;
}
static int
-timed_lookups_multi(unsigned with_data, unsigned table_index)
+timed_lookups_multi(unsigned int with_data, unsigned int table_index,
+ unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -367,11 +399,20 @@ timed_lookups_multi(unsigned with_data, unsigned table_index)
void *ret_data[BURST_SIZE];
uint64_t hit_mask;
int ret;
+ unsigned int keys_to_add, num_lookups;
+
+ if (!ext) {
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ num_lookups = NUM_LOOKUPS * ADD_PERCENT;
+ } else {
+ keys_to_add = KEYS_TO_ADD;
+ num_lookups = NUM_LOOKUPS;
+ }
const uint64_t start_tsc = rte_rdtsc();
- for (i = 0; i < NUM_LOOKUPS/KEYS_TO_ADD; i++) {
- for (j = 0; j < KEYS_TO_ADD/BURST_SIZE; j++) {
+ for (i = 0; i < num_lookups/keys_to_add; i++) {
+ for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
if (with_data) {
@@ -419,19 +460,25 @@ timed_lookups_multi(unsigned with_data, unsigned table_index)
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/NUM_LOOKUPS;
+ cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
return 0;
}
static int
-timed_deletes(unsigned with_hash, unsigned with_data, unsigned table_index)
+timed_deletes(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i;
const uint64_t start_tsc = rte_rdtsc();
int32_t ret;
+ unsigned int keys_to_add;
+ if (!ext)
+ keys_to_add = KEYS_TO_ADD * ADD_PERCENT;
+ else
+ keys_to_add = KEYS_TO_ADD;
- for (i = 0; i < KEYS_TO_ADD; i++) {
+ for (i = 0; i < keys_to_add; i++) {
/* There are no delete functions with data, so just call two functions */
if (with_hash)
ret = rte_hash_del_key_with_hash(h[table_index],
@@ -451,7 +498,7 @@ timed_deletes(unsigned with_hash, unsigned with_data, unsigned table_index)
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][DELETE][with_hash][with_data] = time_taken/KEYS_TO_ADD;
+ cycles[table_index][DELETE][with_hash][with_data] = time_taken/keys_to_add;
return 0;
}
@@ -469,7 +516,8 @@ reset_table(unsigned table_index)
}
static int
-run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks)
+run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
+ unsigned int ext)
{
unsigned i, j, with_data, with_hash;
@@ -478,25 +526,25 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks)
for (with_data = 0; with_data <= 1; with_data++) {
for (i = 0; i < NUM_KEYSIZES; i++) {
- if (create_table(with_data, i, with_locks) < 0)
+ if (create_table(with_data, i, with_locks, ext) < 0)
return -1;
- if (get_input_keys(with_pushes, i) < 0)
+ if (get_input_keys(with_pushes, i, ext) < 0)
return -1;
for (with_hash = 0; with_hash <= 1; with_hash++) {
- if (timed_adds(with_hash, with_data, i) < 0)
+ if (timed_adds(with_hash, with_data, i, ext) < 0)
return -1;
for (j = 0; j < NUM_SHUFFLES; j++)
- shuffle_input_keys(i);
+ shuffle_input_keys(i, ext);
- if (timed_lookups(with_hash, with_data, i) < 0)
+ if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i) < 0)
+ if (timed_lookups_multi(with_data, i, ext) < 0)
return -1;
- if (timed_deletes(with_hash, with_data, i) < 0)
+ if (timed_deletes(with_hash, with_data, i, ext) < 0)
return -1;
/* Print a dot to show progress on operations */
@@ -632,10 +680,16 @@ test_hash_perf(void)
printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
else
printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
- if (run_all_tbl_perf_tests(with_pushes, with_locks) < 0)
+ if (run_all_tbl_perf_tests(with_pushes, with_locks, 0) < 0)
return -1;
}
}
+
+ printf("\n EXTENDABLE BUCKETS PERFORMANCE\n");
+
+ if (run_all_tbl_perf_tests(1, 0, 1) < 0)
+ return -1;
+
if (fbk_hash_perf_test() < 0)
return -1;
--
2.7.4
next prev parent reply other threads:[~2018-10-11 4:32 UTC|newest]
Thread overview: 107+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-06 17:09 [dpdk-dev] [PATCH v1 0/5] hash: add extendable bucket and partial-key hashing Yipeng Wang
2018-09-06 17:09 ` [dpdk-dev] [PATCH v1 1/5] test: fix bucket size in hash table perf test Yipeng Wang
2018-09-06 17:09 ` [dpdk-dev] [PATCH v1 2/5] test: more accurate hash table perf test output Yipeng Wang
2018-09-06 17:09 ` [dpdk-dev] [PATCH v1 3/5] hash: add extendable bucket feature Yipeng Wang
2018-09-06 17:09 ` [dpdk-dev] [PATCH v1 4/5] test: implement extendable bucket hash test Yipeng Wang
2018-09-06 17:09 ` [dpdk-dev] [PATCH v1 5/5] hash: use partial-key hashing Yipeng Wang
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 0/7] hash: add extendable bucket and partial key hashing Yipeng Wang
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 1/7] test/hash: fix bucket size in hash perf test Yipeng Wang
2018-09-26 10:04 ` Bruce Richardson
2018-09-27 3:39 ` Wang, Yipeng1
2018-09-27 4:23 ` Honnappa Nagarahalli
2018-09-29 0:31 ` Wang, Yipeng1
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 2/7] test/hash: more accurate hash perf test output Yipeng Wang
2018-09-26 10:07 ` Bruce Richardson
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 3/7] test/hash: fix rw test with non-consecutive cores Yipeng Wang
2018-09-26 11:02 ` Bruce Richardson
2018-09-27 3:40 ` Wang, Yipeng1
2018-09-26 11:13 ` Bruce Richardson
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 4/7] hash: fix unnecessary code Yipeng Wang
2018-09-26 12:55 ` Bruce Richardson
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 5/7] hash: add extendable bucket feature Yipeng Wang
2018-09-27 4:23 ` Honnappa Nagarahalli
2018-09-27 11:15 ` Bruce Richardson
2018-09-27 11:27 ` Ananyev, Konstantin
2018-09-27 12:27 ` Bruce Richardson
2018-09-27 12:33 ` Ananyev, Konstantin
2018-09-27 19:21 ` Honnappa Nagarahalli
2018-09-28 17:35 ` Wang, Yipeng1
2018-09-29 21:09 ` Honnappa Nagarahalli
2018-09-29 1:10 ` Wang, Yipeng1
2018-10-01 20:56 ` Honnappa Nagarahalli
2018-10-02 1:56 ` Wang, Yipeng1
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 6/7] test/hash: implement extendable bucket hash test Yipeng Wang
2018-09-27 4:24 ` Honnappa Nagarahalli
2018-09-29 0:50 ` Wang, Yipeng1
2018-09-21 17:17 ` [dpdk-dev] [PATCH v2 7/7] hash: use partial-key hashing Yipeng Wang
2018-09-27 4:24 ` Honnappa Nagarahalli
2018-09-29 0:55 ` Wang, Yipeng1
2018-09-26 12:57 ` [dpdk-dev] [PATCH v2 0/7] hash: add extendable bucket and partial key hashing Bruce Richardson
2018-09-27 3:41 ` Wang, Yipeng1
2018-09-27 4:23 ` Honnappa Nagarahalli
2018-09-29 0:46 ` Wang, Yipeng1
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 0/5] hash: fix multiple issues Yipeng Wang
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 1/5] test/hash: fix bucket size in hash perf test Yipeng Wang
2018-09-27 11:17 ` Bruce Richardson
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 2/5] test/hash: more accurate hash perf test output Yipeng Wang
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 3/5] test/hash: fix rw test with non-consecutive cores Yipeng Wang
2018-09-27 11:18 ` Bruce Richardson
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 4/5] test/hash: fix missing file in meson build file Yipeng Wang
2018-09-27 11:22 ` Bruce Richardson
2018-09-26 12:54 ` [dpdk-dev] [PATCH v3 5/5] hash: fix unused define Yipeng Wang
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 0/5] hash: fix multiple issues Yipeng Wang
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 1/5] test/hash: fix bucket size in hash perf test Yipeng Wang
2018-10-01 20:28 ` Honnappa Nagarahalli
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 2/5] test/hash: more accurate hash perf test output Yipeng Wang
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 3/5] test/hash: fix rw test with non-consecutive cores Yipeng Wang
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 4/5] test/hash: fix missing file in meson build file Yipeng Wang
2018-09-28 14:11 ` [dpdk-dev] [PATCH v4 5/5] hash: fix unused define Yipeng Wang
2018-10-25 22:04 ` [dpdk-dev] [PATCH v4 0/5] hash: fix multiple issues Thomas Monjalon
2018-09-26 20:26 ` [dpdk-dev] [PATCH v3 0/3] hash: add extendable bucket and partial key hashing Yipeng Wang
2018-09-26 20:26 ` [dpdk-dev] [PATCH v3 1/3] hash: add extendable bucket feature Yipeng Wang
2018-09-26 20:26 ` [dpdk-dev] [PATCH v3 2/3] test/hash: implement extendable bucket hash test Yipeng Wang
2018-09-26 20:26 ` [dpdk-dev] [PATCH v3 3/3] hash: use partial-key hashing Yipeng Wang
2018-09-28 17:23 ` [dpdk-dev] [PATCH v4 0/4] hash: add extendable bucket and partial key hashing Yipeng Wang
2018-09-28 17:23 ` [dpdk-dev] [PATCH v4 1/4] hash: fix race condition in iterate Yipeng Wang
2018-10-01 20:23 ` Honnappa Nagarahalli
2018-10-02 0:17 ` Wang, Yipeng1
2018-10-02 4:26 ` Honnappa Nagarahalli
2018-10-02 23:53 ` Wang, Yipeng1
2018-09-28 17:23 ` [dpdk-dev] [PATCH v4 2/4] hash: add extendable bucket feature Yipeng Wang
2018-10-02 3:58 ` Honnappa Nagarahalli
2018-10-02 23:39 ` Wang, Yipeng1
2018-10-03 4:37 ` Honnappa Nagarahalli
2018-10-03 15:08 ` Stephen Hemminger
2018-10-03 15:08 ` Stephen Hemminger
2018-10-03 16:53 ` Wang, Yipeng1
2018-10-03 17:59 ` Honnappa Nagarahalli
2018-10-04 1:22 ` Wang, Yipeng1
2018-09-28 17:23 ` [dpdk-dev] [PATCH v4 3/4] test/hash: implement extendable bucket hash test Yipeng Wang
2018-10-01 19:53 ` Honnappa Nagarahalli
2018-09-28 17:23 ` [dpdk-dev] [PATCH v4 4/4] hash: use partial-key hashing Yipeng Wang
2018-10-01 20:09 ` Honnappa Nagarahalli
2018-10-03 19:05 ` [dpdk-dev] [PATCH v4 0/4] hash: add extendable bucket and partial key hashing Dharmik Thakkar
2018-10-01 18:34 ` [dpdk-dev] [PATCH v5 " Yipeng Wang
2018-10-01 18:34 ` [dpdk-dev] [PATCH v5 1/4] hash: fix race condition in iterate Yipeng Wang
2018-10-02 17:26 ` Honnappa Nagarahalli
2018-10-01 18:35 ` [dpdk-dev] [PATCH v5 2/4] hash: add extendable bucket feature Yipeng Wang
2018-10-01 18:35 ` [dpdk-dev] [PATCH v5 3/4] test/hash: implement extendable bucket hash test Yipeng Wang
2018-10-01 18:35 ` [dpdk-dev] [PATCH v5 4/4] hash: use partial-key hashing Yipeng Wang
2018-10-02 20:52 ` Dharmik Thakkar
2018-10-03 0:43 ` Wang, Yipeng1
2018-10-03 19:10 ` [dpdk-dev] [PATCH v5 0/4] hash: add extendable bucket and partial key hashing Dharmik Thakkar
2018-10-04 0:36 ` Wang, Yipeng1
2018-10-04 16:35 ` [dpdk-dev] [PATCH v6 " Yipeng Wang
2018-10-04 16:35 ` [dpdk-dev] [PATCH v6 1/4] hash: fix race condition in iterate Yipeng Wang
2018-10-04 16:35 ` [dpdk-dev] [PATCH v6 2/4] hash: add extendable bucket feature Yipeng Wang
2018-10-04 16:35 ` [dpdk-dev] [PATCH v6 3/4] test/hash: implement extendable bucket hash test Yipeng Wang
2018-10-04 16:35 ` [dpdk-dev] [PATCH v6 4/4] hash: use partial-key hashing Yipeng Wang
2018-10-10 21:27 ` [dpdk-dev] [PATCH v7 0/4] hash: add extendable bucket and partial key hashing Yipeng Wang
2018-10-10 21:27 ` [dpdk-dev] [PATCH v7 1/4] hash: fix race condition in iterate Yipeng Wang
2018-10-10 21:27 ` [dpdk-dev] [PATCH v7 2/4] hash: add extendable bucket feature Yipeng Wang
2018-10-10 21:27 ` Yipeng Wang [this message]
2018-10-10 21:27 ` [dpdk-dev] [PATCH v7 4/4] hash: use partial-key hashing Yipeng Wang
2018-10-16 18:47 ` [dpdk-dev] [PATCH] doc: update release note for hash library Yipeng Wang
2018-10-17 20:09 ` Honnappa Nagarahalli
2018-10-25 18:45 ` Wang, Yipeng1
2018-10-25 23:07 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1539206862-306341-4-git-send-email-yipeng1.wang@intel.com \
--to=yipeng1.wang@intel.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=dharmik.thakkar@arm.com \
--cc=honnappa.nagarahalli@arm.com \
--cc=konstantin.ananyev@intel.com \
--cc=michel@digirati.com.br \
--cc=qiaobinf@bu.edu \
--cc=sameh.gobriel@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).