* [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array
@ 2020-03-09 12:44 Vladimir Medvedkin
2020-03-09 12:44 ` [dpdk-dev] [PATCH 2/2] test: update hash performance tests Vladimir Medvedkin
` (3 more replies)
0 siblings, 4 replies; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-03-09 12:44 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Implement rte_hash_lookup_with_hash_bulk_data() - lookup
function with precomputed hash signatures.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 402 +++++++++++++++++++++++++++++++++++
lib/librte_hash/rte_hash.h | 27 +++
lib/librte_hash/rte_hash_version.map | 1 +
3 files changed, 430 insertions(+)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6af8ca4..9a054eb 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -2165,6 +2165,408 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
return __builtin_popcountl(*hit_mask);
}
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __hash_rw_reader_lock(h);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* all found, do not need to go through ext bkt */
+ if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ __hash_rw_reader_unlock(h);
+ return;
+ }
+
+ /* need to check ext buckets for match */
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+ uint32_t cnt_b, cnt_a;
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ for (i = 0; i < num_keys; i++)
+ positions[i] = -ENOENT;
+
+ do {
+ /* Load the table change counter before the lookup
+ * starts. Acquire semantics will make sure that
+ * loads in compare_signatures are not hoisted.
+ */
+ cnt_b = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ __atomic_load_n(
+ &primary_bkt[i]->key_idx[hit_index],
+ __ATOMIC_ACQUIRE);
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ __atomic_load_n(
+ &secondary_bkt[i]->key_idx[hit_index],
+ __ATOMIC_ACQUIRE);
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* all found, do not need to go through ext bkt */
+ if (hits == ((1ULL << num_keys) - 1)) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ return;
+ }
+ /* need to check ext buckets for match */
+ if (h->ext_table_support) {
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_lf(h,
+ keys[i], sig[i],
+ &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_lf(h,
+ keys[i], sig[i],
+ NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+ }
+ /* The loads of sig_current in compare_signatures
+ * should not move below the load from tbl_chng_cnt.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ /* Re-read the table change counter to check if the
+ * table has changed during search. If yes, re-do
+ * the search.
+ * This load should not get hoisted. The load
+ * acquires on cnt_b, primary key index and secondary
+ * key index will make sure that it does not get
+ * hoisted.
+ */
+ cnt_a = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+ } while (cnt_b != cnt_a);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (prim_hash == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, prim_hash, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ed0673b..9d3f0fa 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -528,6 +528,33 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
* Hash table to look in.
* @param keys
* A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
+ * Find multiple keys in the hash table.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
* @param num_keys
* How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
* @param positions
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index a8fbbc3..fc3cb60 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -33,6 +33,7 @@ EXPERIMENTAL {
global:
rte_hash_free_key_with_position;
+ rte_hash_lookup_with_hash_bulk_data;
rte_hash_max_key_id;
};
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH 2/2] test: update hash performance tests
2020-03-09 12:44 [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Vladimir Medvedkin
@ 2020-03-09 12:44 ` Vladimir Medvedkin
2020-03-17 17:27 ` [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Wang, Yipeng1
` (2 subsequent siblings)
3 siblings, 0 replies; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-03-09 12:44 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Add preformance test for rte_hash_lookup_with_hash_bulk_data()
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
app/test/test_hash_perf.c | 45 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 40 insertions(+), 5 deletions(-)
diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index a438eae..e0d1600 100644
--- a/app/test/test_hash_perf.c
+++ b/app/test/test_hash_perf.c
@@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data,
}
static int
-timed_lookups_multi(unsigned int with_data, unsigned int table_index,
- unsigned int ext)
+timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
- if (with_data) {
+ if (!with_hash && with_data) {
ret = rte_hash_lookup_bulk_data(h[table_index],
(const void **) keys_burst,
BURST_SIZE,
@@ -442,6 +442,39 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
return -1;
}
}
+ } else if (with_hash && with_data) {
+ ret = rte_hash_lookup_with_hash_bulk_data(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, &hit_mask, ret_data);
+ if (ret != BURST_SIZE) {
+ printf("Expect to find %u keys,"
+ " but found %d\n",
+ BURST_SIZE, ret);
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if ((hit_mask & (1ULL << k)) == 0) {
+ printf("Key number %u"
+ " not found\n",
+ j * BURST_SIZE + k);
+ return -1;
+ }
+ expected_data[k] =
+ (void *)((uintptr_t)signatures[
+ j * BURST_SIZE + k]);
+ if (ret_data[k] != expected_data[k]) {
+ printf("Data returned for key"
+ " number %u is %p,"
+ " but should be %p\n",
+ j * BURST_SIZE + k,
+ ret_data[k],
+ expected_data[k]);
+ return -1;
+ }
+ }
+
} else {
rte_hash_lookup_bulk(h[table_index],
(const void **) keys_burst,
@@ -462,7 +495,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
+ cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
+ time_taken/num_lookups;
return 0;
}
@@ -543,7 +577,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i, ext) < 0)
+ if (timed_lookups_multi(with_hash, with_data,
+ i, ext) < 0)
return -1;
if (timed_deletes(with_hash, with_data, i, ext) < 0)
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array
2020-03-09 12:44 [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Vladimir Medvedkin
2020-03-09 12:44 ` [dpdk-dev] [PATCH 2/2] test: update hash performance tests Vladimir Medvedkin
@ 2020-03-17 17:27 ` Wang, Yipeng1
2020-03-26 14:16 ` Medvedkin, Vladimir
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
3 siblings, 1 reply; 19+ messages in thread
From: Wang, Yipeng1 @ 2020-03-17 17:27 UTC (permalink / raw)
To: Medvedkin, Vladimir, dev; +Cc: Gobriel, Sameh, Richardson, Bruce
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Monday, March 9, 2020 5:44 AM
> To: dev@dpdk.org
> Cc: Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>
> Subject: [PATCH 1/2] hash: add hash bulk lookup with hash signatures array
>
> Implement rte_hash_lookup_with_hash_bulk_data() - lookup function with
> precomputed hash signatures.
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> --- a/lib/librte_hash/rte_hash.h
> +++ b/lib/librte_hash/rte_hash.h
> @@ -528,6 +528,33 @@ rte_hash_lookup_bulk_data(const struct rte_hash
> *h, const void **keys,
> * Hash table to look in.
> * @param keys
> * A pointer to a list of keys to look for.
> + * @param sig
> + * A pointer to a list of precomputed hash values for keys.
> + * @param num_keys
> + * How many keys are in the keys list (less than
> RTE_HASH_LOOKUP_BULK_MAX).
> + * @param hit_mask
> + * Output containing a bitmask with all successful lookups.
> + * @param data
> + * Output containing array of data returned from all the successful lookups.
> + * @return
> + * -EINVAL if there's an error, otherwise number of successful lookups.
> + */
> +__rte_experimental
> +int
> +rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
> + const void **keys, hash_sig_t *prim_hash,
[Wang, Yipeng] hash_sig_t *sig
> + uint32_t num_keys, uint64_t *hit_mask, void *data[]);
> +
> +/**
> + * Find multiple keys in the hash table.
[Wang, Yipeng] ...with precomputed hash value array.
> + * This operation is multi-thread safe with regarding to other lookup threads.
> + * Read-write concurrency can be enabled by setting flag during
> + * table creation.
> + *
[Wang, Yipeng]
Hi, Vladimir, thanks for the patch!
Besides the minor comments above, my major concern is the code duplication here.
It is after all hundred's lines of code that in future if we want to extend features, we need
to modify both function blocks.
Have you tried to just do if-else and reuse the current code block, and measure the performance?
If that causes performance difference, then we may justify adding the duplication, but we still
Need to optimize the software pipelining accordingly.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array
2020-03-17 17:27 ` [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Wang, Yipeng1
@ 2020-03-26 14:16 ` Medvedkin, Vladimir
0 siblings, 0 replies; 19+ messages in thread
From: Medvedkin, Vladimir @ 2020-03-26 14:16 UTC (permalink / raw)
To: Wang, Yipeng1, dev; +Cc: Gobriel, Sameh, Richardson, Bruce
Hi Yipeng,
On 17/03/2020 17:27, Wang, Yipeng1 wrote:
>> -----Original Message-----
>> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
>> Sent: Monday, March 9, 2020 5:44 AM
>> To: dev@dpdk.org
>> Cc: Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
>> <sameh.gobriel@intel.com>; Richardson, Bruce
>> <bruce.richardson@intel.com>
>> Subject: [PATCH 1/2] hash: add hash bulk lookup with hash signatures array
>>
>> Implement rte_hash_lookup_with_hash_bulk_data() - lookup function with
>> precomputed hash signatures.
>>
>> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
>> ---
>> --- a/lib/librte_hash/rte_hash.h
>> +++ b/lib/librte_hash/rte_hash.h
>> @@ -528,6 +528,33 @@ rte_hash_lookup_bulk_data(const struct rte_hash
>> *h, const void **keys,
>> * Hash table to look in.
>> * @param keys
>> * A pointer to a list of keys to look for.
>> + * @param sig
>> + * A pointer to a list of precomputed hash values for keys.
>> + * @param num_keys
>> + * How many keys are in the keys list (less than
>> RTE_HASH_LOOKUP_BULK_MAX).
>> + * @param hit_mask
>> + * Output containing a bitmask with all successful lookups.
>> + * @param data
>> + * Output containing array of data returned from all the successful lookups.
>> + * @return
>> + * -EINVAL if there's an error, otherwise number of successful lookups.
>> + */
>> +__rte_experimental
>> +int
>> +rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
>> + const void **keys, hash_sig_t *prim_hash,
> [Wang, Yipeng] hash_sig_t *sig
>> + uint32_t num_keys, uint64_t *hit_mask, void *data[]);
>> +
>> +/**
>> + * Find multiple keys in the hash table.
> [Wang, Yipeng] ...with precomputed hash value array.
>> + * This operation is multi-thread safe with regarding to other lookup threads.
>> + * Read-write concurrency can be enabled by setting flag during
>> + * table creation.
>> + *
> [Wang, Yipeng]
> Hi, Vladimir, thanks for the patch!
>
> Besides the minor comments above, my major concern is the code duplication here.
> It is after all hundred's lines of code that in future if we want to extend features, we need
> to modify both function blocks.
>
> Have you tried to just do if-else and reuse the current code block, and measure the performance?
> If that causes performance difference, then we may justify adding the duplication, but we still
> Need to optimize the software pipelining accordingly.
Agree, I'll get rid of code duplication in v2.
>
--
Regards,
Vladimir
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] hash: add hash bulk lookup with hash signatures array
2020-03-09 12:44 [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Vladimir Medvedkin
2020-03-09 12:44 ` [dpdk-dev] [PATCH 2/2] test: update hash performance tests Vladimir Medvedkin
2020-03-17 17:27 ` [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Wang, Yipeng1
@ 2020-03-26 16:47 ` Vladimir Medvedkin
2020-03-30 20:20 ` Wang, Yipeng1
` (2 more replies)
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
3 siblings, 3 replies; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-03-26 16:47 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Implement rte_hash_lookup_with_hash_bulk_data() - lookup
function with precomputed hash signatures.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 296 +++++++++++++++++++++++------------
lib/librte_hash/rte_hash.h | 27 ++++
lib/librte_hash/rte_hash_version.map | 1 +
3 files changed, 227 insertions(+), 97 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6af8ca4..24a0756 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1711,64 +1711,20 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
}
}
-#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_l(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
@@ -1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
}
static inline void
-__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2124,6 +2037,92 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+#define PREFETCH_OFFSET 4
+static inline void
+__bulk_lookup_prefetching_loop(const struct rte_hash *h,
+ const void **keys, int32_t num_keys,
+ uint16_t *sig,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt)
+{
+ int32_t i;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+}
+
+
+static inline void
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
@@ -2165,6 +2164,109 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
return __builtin_popcountl(*hit_mask);
}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ed0673b..dd8e7af 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -519,6 +519,33 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
uint32_t num_keys, uint64_t *hit_mask, void *data[]);
/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
* Find multiple keys in the hash table.
* This operation is multi-thread safe with regarding to other lookup threads.
* Read-write concurrency can be enabled by setting flag during
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index a8fbbc3..fc3cb60 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -33,6 +33,7 @@ EXPERIMENTAL {
global:
rte_hash_free_key_with_position;
+ rte_hash_lookup_with_hash_bulk_data;
rte_hash_max_key_id;
};
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] test: update hash performance tests
2020-03-09 12:44 [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Vladimir Medvedkin
` (2 preceding siblings ...)
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
@ 2020-03-26 16:47 ` Vladimir Medvedkin
2020-03-30 20:33 ` Wang, Yipeng1
3 siblings, 1 reply; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-03-26 16:47 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Add preformance test for rte_hash_lookup_with_hash_bulk_data()
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
app/test/test_hash_perf.c | 45 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 40 insertions(+), 5 deletions(-)
diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index a438eae..e0d1600 100644
--- a/app/test/test_hash_perf.c
+++ b/app/test/test_hash_perf.c
@@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data,
}
static int
-timed_lookups_multi(unsigned int with_data, unsigned int table_index,
- unsigned int ext)
+timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
- if (with_data) {
+ if (!with_hash && with_data) {
ret = rte_hash_lookup_bulk_data(h[table_index],
(const void **) keys_burst,
BURST_SIZE,
@@ -442,6 +442,39 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
return -1;
}
}
+ } else if (with_hash && with_data) {
+ ret = rte_hash_lookup_with_hash_bulk_data(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, &hit_mask, ret_data);
+ if (ret != BURST_SIZE) {
+ printf("Expect to find %u keys,"
+ " but found %d\n",
+ BURST_SIZE, ret);
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if ((hit_mask & (1ULL << k)) == 0) {
+ printf("Key number %u"
+ " not found\n",
+ j * BURST_SIZE + k);
+ return -1;
+ }
+ expected_data[k] =
+ (void *)((uintptr_t)signatures[
+ j * BURST_SIZE + k]);
+ if (ret_data[k] != expected_data[k]) {
+ printf("Data returned for key"
+ " number %u is %p,"
+ " but should be %p\n",
+ j * BURST_SIZE + k,
+ ret_data[k],
+ expected_data[k]);
+ return -1;
+ }
+ }
+
} else {
rte_hash_lookup_bulk(h[table_index],
(const void **) keys_burst,
@@ -462,7 +495,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
+ cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
+ time_taken/num_lookups;
return 0;
}
@@ -543,7 +577,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i, ext) < 0)
+ if (timed_lookups_multi(with_hash, with_data,
+ i, ext) < 0)
return -1;
if (timed_deletes(with_hash, with_data, i, ext) < 0)
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] hash: add hash bulk lookup with hash signatures array
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
@ 2020-03-30 20:20 ` Wang, Yipeng1
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 " Vladimir Medvedkin
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests Vladimir Medvedkin
2 siblings, 0 replies; 19+ messages in thread
From: Wang, Yipeng1 @ 2020-03-30 20:20 UTC (permalink / raw)
To: Medvedkin, Vladimir, dev; +Cc: Gobriel, Sameh, Richardson, Bruce
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Thursday, March 26, 2020 9:47 AM
> To: dev@dpdk.org
> Cc: Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>
> Subject: [PATCH v2 1/2] hash: add hash bulk lookup with hash signatures
> array
>
> Implement rte_hash_lookup_with_hash_bulk_data() - lookup function with
> precomputed hash signatures.
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> lib/librte_hash/rte_cuckoo_hash.c | 296 +++++++++++++++++++++++-----
> -------
> lib/librte_hash/rte_hash.h | 27 ++++
> lib/librte_hash/rte_hash_version.map | 1 +
> 3 files changed, 227 insertions(+), 97 deletions(-)
>
> diff --git a/lib/librte_hash/rte_cuckoo_hash.c
> b/lib/librte_hash/rte_cuckoo_hash.c
> index 6af8ca4..24a0756 100644
> --- a/lib/librte_hash/rte_cuckoo_hash.c
> +++ b/lib/librte_hash/rte_cuckoo_hash.c
> @@ -1711,64 +1711,20 @@ compare_signatures(uint32_t
> *prim_hash_matches, uint32_t *sec_hash_matches,
> }
> }
>
> -#define PREFETCH_OFFSET 4
> static inline void
> -__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
> - int32_t num_keys, int32_t *positions,
> - uint64_t *hit_mask, void *data[])
> +__bulk_lookup_l(const struct rte_hash *h, const void **keys,
> + const struct rte_hash_bucket **primary_bkt,
> + const struct rte_hash_bucket **secondary_bkt,
> + uint16_t *sig, int32_t num_keys, int32_t *positions,
> + uint64_t *hit_mask, void *data[])
> {
> uint64_t hits = 0;
> int32_t i;
> int32_t ret;
> - uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
> - uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
> - uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
> - uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
> - const struct rte_hash_bucket
> *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
> - const struct rte_hash_bucket
> *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
> uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
> uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
> struct rte_hash_bucket *cur_bkt, *next_bkt;
>
> - /* Prefetch first keys */
> - for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
> - rte_prefetch0(keys[i]);
> -
> - /*
> - * Prefetch rest of the keys, calculate primary and
> - * secondary bucket and prefetch them
> - */
> - for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
> - rte_prefetch0(keys[i + PREFETCH_OFFSET]);
> -
> - prim_hash[i] = rte_hash_hash(h, keys[i]);
> -
> - sig[i] = get_short_sig(prim_hash[i]);
> - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> -
> - primary_bkt[i] = &h->buckets[prim_index[i]];
> - secondary_bkt[i] = &h->buckets[sec_index[i]];
> -
> - rte_prefetch0(primary_bkt[i]);
> - rte_prefetch0(secondary_bkt[i]);
> - }
> -
> - /* Calculate and prefetch rest of the buckets */
> - for (; i < num_keys; i++) {
> - prim_hash[i] = rte_hash_hash(h, keys[i]);
> -
> - sig[i] = get_short_sig(prim_hash[i]);
> - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> -
> - primary_bkt[i] = &h->buckets[prim_index[i]];
> - secondary_bkt[i] = &h->buckets[sec_index[i]];
> -
> - rte_prefetch0(primary_bkt[i]);
> - rte_prefetch0(secondary_bkt[i]);
> - }
> -
> __hash_rw_reader_lock(h);
>
> /* Compare signatures and prefetch key slot of first hit */ @@ -
> 1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h,
> const void **keys, }
>
> static inline void
> -__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
> - int32_t num_keys, int32_t *positions,
> - uint64_t *hit_mask, void *data[])
> +__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
> + const struct rte_hash_bucket **primary_bkt,
> + const struct rte_hash_bucket **secondary_bkt,
> + uint16_t *sig, int32_t num_keys, int32_t *positions,
> + uint64_t *hit_mask, void *data[])
> {
> uint64_t hits = 0;
> int32_t i;
> int32_t ret;
> - uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
> - uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
> - uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
> - uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
> - const struct rte_hash_bucket
> *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
> - const struct rte_hash_bucket
> *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
> uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
> uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
> struct rte_hash_bucket *cur_bkt, *next_bkt;
> uint32_t cnt_b, cnt_a;
>
> - /* Prefetch first keys */
> - for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
> - rte_prefetch0(keys[i]);
> -
> - /*
> - * Prefetch rest of the keys, calculate primary and
> - * secondary bucket and prefetch them
> - */
> - for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
> - rte_prefetch0(keys[i + PREFETCH_OFFSET]);
> -
> - prim_hash[i] = rte_hash_hash(h, keys[i]);
> -
> - sig[i] = get_short_sig(prim_hash[i]);
> - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> -
> - primary_bkt[i] = &h->buckets[prim_index[i]];
> - secondary_bkt[i] = &h->buckets[sec_index[i]];
> -
> - rte_prefetch0(primary_bkt[i]);
> - rte_prefetch0(secondary_bkt[i]);
> - }
> -
> - /* Calculate and prefetch rest of the buckets */
> - for (; i < num_keys; i++) {
> - prim_hash[i] = rte_hash_hash(h, keys[i]);
> -
> - sig[i] = get_short_sig(prim_hash[i]);
> - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> -
> - primary_bkt[i] = &h->buckets[prim_index[i]];
> - secondary_bkt[i] = &h->buckets[sec_index[i]];
> -
> - rte_prefetch0(primary_bkt[i]);
> - rte_prefetch0(secondary_bkt[i]);
> - }
> -
> for (i = 0; i < num_keys; i++)
> positions[i] = -ENOENT;
>
> @@ -2124,6 +2037,92 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash
> *h, const void **keys,
> *hit_mask = hits;
> }
>
> +#define PREFETCH_OFFSET 4
> +static inline void
> +__bulk_lookup_prefetching_loop(const struct rte_hash *h,
> + const void **keys, int32_t num_keys,
> + uint16_t *sig,
> + const struct rte_hash_bucket **primary_bkt,
> + const struct rte_hash_bucket **secondary_bkt) {
> + int32_t i;
> + uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
> + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
> + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
> +
> + /* Prefetch first keys */
> + for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
> + rte_prefetch0(keys[i]);
> +
> + /*
> + * Prefetch rest of the keys, calculate primary and
> + * secondary bucket and prefetch them
> + */
> + for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
> + rte_prefetch0(keys[i + PREFETCH_OFFSET]);
> +
> + prim_hash[i] = rte_hash_hash(h, keys[i]);
> +
> + sig[i] = get_short_sig(prim_hash[i]);
> + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> +
> + primary_bkt[i] = &h->buckets[prim_index[i]];
> + secondary_bkt[i] = &h->buckets[sec_index[i]];
> +
> + rte_prefetch0(primary_bkt[i]);
> + rte_prefetch0(secondary_bkt[i]);
> + }
> +
> + /* Calculate and prefetch rest of the buckets */
> + for (; i < num_keys; i++) {
> + prim_hash[i] = rte_hash_hash(h, keys[i]);
> +
> + sig[i] = get_short_sig(prim_hash[i]);
> + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
> + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
> +
> + primary_bkt[i] = &h->buckets[prim_index[i]];
> + secondary_bkt[i] = &h->buckets[sec_index[i]];
> +
> + rte_prefetch0(primary_bkt[i]);
> + rte_prefetch0(secondary_bkt[i]);
> + }
> +}
> +
> +
[Wang, Yipeng] Here is an unnecessary blank line.
> +static inline void
> +__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
> + int32_t num_keys, int32_t *positions,
> + uint64_t *hit_mask, void *data[])
> +{
> + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
> + const struct rte_hash_bucket
> *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
[Wang, Yipeng]
Thanks for revising the code to be more concise.
It looks so far so good to me.
BTW, would you like to add also the rte_hash_lookup_with_hash_bulk function in this patchset?
Just for the completeness.
Thanks
Yipeng
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] test: update hash performance tests
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
@ 2020-03-30 20:33 ` Wang, Yipeng1
0 siblings, 0 replies; 19+ messages in thread
From: Wang, Yipeng1 @ 2020-03-30 20:33 UTC (permalink / raw)
To: Medvedkin, Vladimir, dev; +Cc: Gobriel, Sameh, Richardson, Bruce
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Thursday, March 26, 2020 9:47 AM
> To: dev@dpdk.org
> Cc: Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>
> Subject: [PATCH v2 2/2] test: update hash performance tests
>
> Add preformance test for rte_hash_lookup_with_hash_bulk_data()
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> app/test/test_hash_perf.c | 45
> ++++++++++++++++++++++++++++++++++++++++-----
> 1 file changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c index
> a438eae..e0d1600 100644
> --- a/app/test/test_hash_perf.c
> +++ b/app/test/test_hash_perf.c
> @@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int
> with_data, }
>
> static int
> -timed_lookups_multi(unsigned int with_data, unsigned int table_index,
> - unsigned int ext)
> +timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
> + unsigned int table_index, unsigned int ext)
> {
> unsigned i, j, k;
> int32_t positions_burst[BURST_SIZE];
> @@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data,
> unsigned int table_index,
> for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
> for (k = 0; k < BURST_SIZE; k++)
> keys_burst[k] = keys[j * BURST_SIZE + k];
> - if (with_data) {
> + if (!with_hash && with_data) {
> ret =
> rte_hash_lookup_bulk_data(h[table_index],
> (const void **) keys_burst,
> BURST_SIZE,
> @@ -442,6 +442,39 @@ timed_lookups_multi(unsigned int with_data,
> unsigned int table_index,
> return -1;
> }
> }
> + } else if (with_hash && with_data) {
> + ret =
> rte_hash_lookup_with_hash_bulk_data(
> + h[table_index],
> + (const void **)keys_burst,
> + &signatures[j * BURST_SIZE],
> + BURST_SIZE, &hit_mask, ret_data);
> + if (ret != BURST_SIZE) {
> + printf("Expect to find %u keys,"
> + " but found %d\n",
> + BURST_SIZE, ret);
> + return -1;
> + }
> + for (k = 0; k < BURST_SIZE; k++) {
> + if ((hit_mask & (1ULL << k)) == 0) {
> + printf("Key number %u"
> + " not found\n",
> + j * BURST_SIZE + k);
> + return -1;
> + }
> + expected_data[k] =
> + (void
> *)((uintptr_t)signatures[
> + j * BURST_SIZE + k]);
> + if (ret_data[k] != expected_data[k]) {
> + printf("Data returned for
> key"
> + " number %u is %p,"
> + " but should
> be %p\n",
> + j * BURST_SIZE + k,
> + ret_data[k],
> + expected_data[k]);
> + return -1;
> + }
> + }
> +
> } else {
> rte_hash_lookup_bulk(h[table_index],
> (const void **) keys_burst,
[Wang, Yipeng]
Although not implemented for (with_hash && !with_data), data would be still printed to output.
This could be misleading.
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] hash: add hash bulk lookup with hash signatures array
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
2020-03-30 20:20 ` Wang, Yipeng1
@ 2020-04-08 18:32 ` Vladimir Medvedkin
2020-04-16 9:47 ` Thomas Monjalon
2020-04-16 15:00 ` [dpdk-dev] [PATCH v4] " Vladimir Medvedkin
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests Vladimir Medvedkin
2 siblings, 2 replies; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-04-08 18:32 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Implement rte_hash_lookup_with_hash_bulk_data() - lookup
function with precomputed hash signatures.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 309 ++++++++++++++++++++++++-----------
lib/librte_hash/rte_hash.h | 55 +++++++
lib/librte_hash/rte_hash_version.map | 2 +
3 files changed, 269 insertions(+), 97 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6af8ca4..a5e6677 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1711,64 +1711,20 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
}
}
-#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_l(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
@@ -1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
}
static inline void
-__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2124,6 +2037,91 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+#define PREFETCH_OFFSET 4
+static inline void
+__bulk_lookup_prefetching_loop(const struct rte_hash *h,
+ const void **keys, int32_t num_keys,
+ uint16_t *sig,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt)
+{
+ int32_t i;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+}
+
+static inline void
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
@@ -2165,6 +2163,123 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
return __builtin_popcountl(*hit_mask);
}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, NULL, NULL);
+ return 0;
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ed0673b..13099d7 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -519,6 +519,61 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
uint32_t num_keys, uint64_t *hit_mask, void *data[]);
/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param positions
+ * Output containing a list of values, corresponding to the list of keys that
+ * can be used by the caller as an offset into an array of user data. These
+ * values are unique for each key, and are the same values that were returned
+ * when each key was added. If a key in the list was not found, then -ENOENT
+ * will be the value.
+ * @return
+ * -EINVAL if there's an error, otherwise 0.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions);
+
+/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
* Find multiple keys in the hash table.
* This operation is multi-thread safe with regarding to other lookup threads.
* Read-write concurrency can be enabled by setting flag during
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index a8fbbc3..c2a9094 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -33,6 +33,8 @@ EXPERIMENTAL {
global:
rte_hash_free_key_with_position;
+ rte_hash_lookup_with_hash_bulk;
+ rte_hash_lookup_with_hash_bulk_data;
rte_hash_max_key_id;
};
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
2020-03-30 20:20 ` Wang, Yipeng1
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 " Vladimir Medvedkin
@ 2020-04-08 18:32 ` Vladimir Medvedkin
2020-04-16 9:44 ` Thomas Monjalon
2 siblings, 1 reply; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-04-08 18:32 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Add preformance test for rte_hash_lookup_with_hash_bulk_data()
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
app/test/test_hash_perf.c | 60 +++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 55 insertions(+), 5 deletions(-)
diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index a438eae..d88bfa9 100644
--- a/app/test/test_hash_perf.c
+++ b/app/test/test_hash_perf.c
@@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data,
}
static int
-timed_lookups_multi(unsigned int with_data, unsigned int table_index,
- unsigned int ext)
+timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
- if (with_data) {
+ if (!with_hash && with_data) {
ret = rte_hash_lookup_bulk_data(h[table_index],
(const void **) keys_burst,
BURST_SIZE,
@@ -442,6 +442,54 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
return -1;
}
}
+ } else if (with_hash && with_data) {
+ ret = rte_hash_lookup_with_hash_bulk_data(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, &hit_mask, ret_data);
+ if (ret != BURST_SIZE) {
+ printf("Expect to find %u keys,"
+ " but found %d\n",
+ BURST_SIZE, ret);
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if ((hit_mask & (1ULL << k)) == 0) {
+ printf("Key number %u"
+ " not found\n",
+ j * BURST_SIZE + k);
+ return -1;
+ }
+ expected_data[k] =
+ (void *)((uintptr_t)signatures[
+ j * BURST_SIZE + k]);
+ if (ret_data[k] != expected_data[k]) {
+ printf("Data returned for key"
+ " number %u is %p,"
+ " but should be %p\n",
+ j * BURST_SIZE + k,
+ ret_data[k],
+ expected_data[k]);
+ return -1;
+ }
+ }
+ } else if (with_hash && !with_data) {
+ ret = rte_hash_lookup_with_hash_bulk(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, positions_burst);
+ for (k = 0; k < BURST_SIZE; k++) {
+ if (positions_burst[k] !=
+ positions[j *
+ BURST_SIZE + k]) {
+ printf("Key looked up in %d, should be in %d\n",
+ positions_burst[k],
+ positions[j * BURST_SIZE + k]);
+ return -1;
+ }
+ }
} else {
rte_hash_lookup_bulk(h[table_index],
(const void **) keys_burst,
@@ -462,7 +510,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
+ cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
+ time_taken/num_lookups;
return 0;
}
@@ -543,7 +592,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i, ext) < 0)
+ if (timed_lookups_multi(with_hash, with_data,
+ i, ext) < 0)
return -1;
if (timed_deletes(with_hash, with_data, i, ext) < 0)
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests Vladimir Medvedkin
@ 2020-04-16 9:44 ` Thomas Monjalon
2020-04-16 14:47 ` Medvedkin, Vladimir
0 siblings, 1 reply; 19+ messages in thread
From: Thomas Monjalon @ 2020-04-16 9:44 UTC (permalink / raw)
To: Vladimir Medvedkin; +Cc: dev, yipeng1.wang, sameh.gobriel, bruce.richardson
08/04/2020 20:32, Vladimir Medvedkin:
> Add preformance test for rte_hash_lookup_with_hash_bulk_data()
Why is it a separate patch?
To me, it is natural to add such test when adding a new hash API.
So they should be in the same patch.
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] hash: add hash bulk lookup with hash signatures array
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 " Vladimir Medvedkin
@ 2020-04-16 9:47 ` Thomas Monjalon
2020-04-16 15:00 ` [dpdk-dev] [PATCH v4] " Vladimir Medvedkin
1 sibling, 0 replies; 19+ messages in thread
From: Thomas Monjalon @ 2020-04-16 9:47 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson, Vladimir Medvedkin
Ping for review, please.
08/04/2020 20:32, Vladimir Medvedkin:
> Implement rte_hash_lookup_with_hash_bulk_data() - lookup
> function with precomputed hash signatures.
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> lib/librte_hash/rte_cuckoo_hash.c | 309 ++++++++++++++++++++++++-----------
> lib/librte_hash/rte_hash.h | 55 +++++++
> lib/librte_hash/rte_hash_version.map | 2 +
> 3 files changed, 269 insertions(+), 97 deletions(-)
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests
2020-04-16 9:44 ` Thomas Monjalon
@ 2020-04-16 14:47 ` Medvedkin, Vladimir
2020-04-16 14:50 ` Thomas Monjalon
0 siblings, 1 reply; 19+ messages in thread
From: Medvedkin, Vladimir @ 2020-04-16 14:47 UTC (permalink / raw)
To: Thomas Monjalon; +Cc: dev, yipeng1.wang, sameh.gobriel, bruce.richardson
Hi Thomas,
On 16/04/2020 10:44, Thomas Monjalon wrote:
> 08/04/2020 20:32, Vladimir Medvedkin:
>> Add preformance test for rte_hash_lookup_with_hash_bulk_data()
> Why is it a separate patch?
> To me, it is natural to add such test when adding a new hash API.
> So they should be in the same patch.
>
Here I split the patch to help review it - first lib part, second - test
part. I can merge it if you think it should be.
>
--
Regards,
Vladimir
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests
2020-04-16 14:47 ` Medvedkin, Vladimir
@ 2020-04-16 14:50 ` Thomas Monjalon
0 siblings, 0 replies; 19+ messages in thread
From: Thomas Monjalon @ 2020-04-16 14:50 UTC (permalink / raw)
To: Medvedkin, Vladimir; +Cc: dev, yipeng1.wang, sameh.gobriel, bruce.richardson
16/04/2020 16:47, Medvedkin, Vladimir:
> Hi Thomas,
>
> On 16/04/2020 10:44, Thomas Monjalon wrote:
> > 08/04/2020 20:32, Vladimir Medvedkin:
> >> Add preformance test for rte_hash_lookup_with_hash_bulk_data()
> > Why is it a separate patch?
> > To me, it is natural to add such test when adding a new hash API.
> > So they should be in the same patch.
> >
>
> Here I split the patch to help review it - first lib part, second - test
> part. I can merge it if you think it should be.
Yes please.
I don't think it helps reviewing because they are in different files anyway.
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v4] hash: add hash bulk lookup with hash signatures array
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 " Vladimir Medvedkin
2020-04-16 9:47 ` Thomas Monjalon
@ 2020-04-16 15:00 ` Vladimir Medvedkin
2020-04-16 15:07 ` [dpdk-dev] [PATCH v5] " Vladimir Medvedkin
1 sibling, 1 reply; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-04-16 15:00 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Implement rte_hash_lookup_with_hash_bulk_data() and
rte_hash_lookup_with_hash_bulk() - bulk lookup
functions with precomputed hash signatures.
Add these two functions into performance tests.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
app/test/test_hash_perf.c | 58 ++++++-
lib/librte_hash/rte_cuckoo_hash.c | 310 ++++++++++++++++++++++++-----------
lib/librte_hash/rte_hash.h | 55 +++++++
lib/librte_hash/rte_hash_version.map | 2 +
4 files changed, 323 insertions(+), 102 deletions(-)
diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index a438eae..eff2f7a 100644
--- a/app/test/test_hash_perf.c
+++ b/app/test/test_hash_perf.c
@@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data,
}
static int
-timed_lookups_multi(unsigned int with_data, unsigned int table_index,
- unsigned int ext)
+timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
- if (with_data) {
+ if (!with_hash && with_data) {
ret = rte_hash_lookup_bulk_data(h[table_index],
(const void **) keys_burst,
BURST_SIZE,
@@ -442,6 +442,52 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
return -1;
}
}
+ } else if (with_hash && with_data) {
+ ret = rte_hash_lookup_with_hash_bulk_data(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, &hit_mask, ret_data);
+ if (ret != BURST_SIZE) {
+ printf("Expect to find %u keys,"
+ " but found %d\n",
+ BURST_SIZE, ret);
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if ((hit_mask & (1ULL << k)) == 0) {
+ printf("Key number %u"
+ " not found\n",
+ j * BURST_SIZE + k);
+ return -1;
+ }
+ expected_data[k] =
+ (void *)((uintptr_t)signatures[
+ j * BURST_SIZE + k]);
+ if (ret_data[k] != expected_data[k]) {
+ printf("Data returned for key"
+ " number %u is %p,"
+ " but should be %p\n",
+ j * BURST_SIZE + k,
+ ret_data[k],
+ expected_data[k]);
+ return -1;
+ }
+ }
+ } else if (with_hash && !with_data) {
+ ret = rte_hash_lookup_with_hash_bulk(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, positions_burst);
+ for (k = 0; k < BURST_SIZE; k++) {
+ if (positions_burst[k] != positions[j * BURST_SIZE + k]) {
+ printf("Key looked up in %d, should be in %d\n",
+ positions_burst[k],
+ positions[j * BURST_SIZE + k]);
+ return -1;
+ }
+ }
} else {
rte_hash_lookup_bulk(h[table_index],
(const void **) keys_burst,
@@ -462,7 +508,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
+ cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
+ time_taken/num_lookups;
return 0;
}
@@ -543,7 +590,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i, ext) < 0)
+ if (timed_lookups_multi(with_hash, with_data,
+ i, ext) < 0)
return -1;
if (timed_deletes(with_hash, with_data, i, ext) < 0)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6af8ca4..38767a8 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1711,64 +1711,20 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
}
}
-#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_l(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
@@ -1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
}
static inline void
-__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2124,6 +2037,92 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+#define PREFETCH_OFFSET 4
+static inline void
+__bulk_lookup_prefetching_loop(const struct rte_hash *h,
+ const void **keys, int32_t num_keys,
+ uint16_t *sig,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt)
+{
+ int32_t i;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+}
+
+
+static inline void
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
@@ -2165,6 +2164,123 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
return __builtin_popcountl(*hit_mask);
}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, NULL, NULL);
+ return 0;
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ed0673b..13099d7 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -519,6 +519,61 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
uint32_t num_keys, uint64_t *hit_mask, void *data[]);
/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param positions
+ * Output containing a list of values, corresponding to the list of keys that
+ * can be used by the caller as an offset into an array of user data. These
+ * values are unique for each key, and are the same values that were returned
+ * when each key was added. If a key in the list was not found, then -ENOENT
+ * will be the value.
+ * @return
+ * -EINVAL if there's an error, otherwise 0.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions);
+
+/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
* Find multiple keys in the hash table.
* This operation is multi-thread safe with regarding to other lookup threads.
* Read-write concurrency can be enabled by setting flag during
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index a8fbbc3..c2a9094 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -33,6 +33,8 @@ EXPERIMENTAL {
global:
rte_hash_free_key_with_position;
+ rte_hash_lookup_with_hash_bulk;
+ rte_hash_lookup_with_hash_bulk_data;
rte_hash_max_key_id;
};
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* [dpdk-dev] [PATCH v5] hash: add hash bulk lookup with hash signatures array
2020-04-16 15:00 ` [dpdk-dev] [PATCH v4] " Vladimir Medvedkin
@ 2020-04-16 15:07 ` Vladimir Medvedkin
2020-04-16 23:46 ` Wang, Yipeng1
0 siblings, 1 reply; 19+ messages in thread
From: Vladimir Medvedkin @ 2020-04-16 15:07 UTC (permalink / raw)
To: dev; +Cc: yipeng1.wang, sameh.gobriel, bruce.richardson
Implement rte_hash_lookup_with_hash_bulk_data() and
rte_hash_lookup_with_hash_bulk() - bulk lookup
functions with precomputed hash signatures.
Add these two functions into performance tests.
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
app/test/test_hash_perf.c | 61 ++++++-
lib/librte_hash/rte_cuckoo_hash.c | 310 ++++++++++++++++++++++++-----------
lib/librte_hash/rte_hash.h | 55 +++++++
lib/librte_hash/rte_hash_version.map | 2 +
4 files changed, 326 insertions(+), 102 deletions(-)
diff --git a/app/test/test_hash_perf.c b/app/test/test_hash_perf.c
index a438eae..76cdac5 100644
--- a/app/test/test_hash_perf.c
+++ b/app/test/test_hash_perf.c
@@ -391,8 +391,8 @@ timed_lookups(unsigned int with_hash, unsigned int with_data,
}
static int
-timed_lookups_multi(unsigned int with_data, unsigned int table_index,
- unsigned int ext)
+timed_lookups_multi(unsigned int with_hash, unsigned int with_data,
+ unsigned int table_index, unsigned int ext)
{
unsigned i, j, k;
int32_t positions_burst[BURST_SIZE];
@@ -417,7 +417,7 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
for (j = 0; j < keys_to_add/BURST_SIZE; j++) {
for (k = 0; k < BURST_SIZE; k++)
keys_burst[k] = keys[j * BURST_SIZE + k];
- if (with_data) {
+ if (!with_hash && with_data) {
ret = rte_hash_lookup_bulk_data(h[table_index],
(const void **) keys_burst,
BURST_SIZE,
@@ -442,6 +442,55 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
return -1;
}
}
+ } else if (with_hash && with_data) {
+ ret = rte_hash_lookup_with_hash_bulk_data(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, &hit_mask, ret_data);
+ if (ret != BURST_SIZE) {
+ printf("Expect to find %u keys,"
+ " but found %d\n",
+ BURST_SIZE, ret);
+ return -1;
+ }
+ for (k = 0; k < BURST_SIZE; k++) {
+ if ((hit_mask & (1ULL << k)) == 0) {
+ printf("Key number %u"
+ " not found\n",
+ j * BURST_SIZE + k);
+ return -1;
+ }
+ expected_data[k] =
+ (void *)((uintptr_t)signatures[
+ j * BURST_SIZE + k]);
+ if (ret_data[k] != expected_data[k]) {
+ printf("Data returned for key"
+ " number %u is %p,"
+ " but should be %p\n",
+ j * BURST_SIZE + k,
+ ret_data[k],
+ expected_data[k]);
+ return -1;
+ }
+ }
+ } else if (with_hash && !with_data) {
+ ret = rte_hash_lookup_with_hash_bulk(
+ h[table_index],
+ (const void **)keys_burst,
+ &signatures[j * BURST_SIZE],
+ BURST_SIZE, positions_burst);
+ for (k = 0; k < BURST_SIZE; k++) {
+ if (positions_burst[k] !=
+ positions[j *
+ BURST_SIZE + k]) {
+ printf("Key looked up in %d, should be in %d\n",
+ positions_burst[k],
+ positions[j *
+ BURST_SIZE + k]);
+ return -1;
+ }
+ }
} else {
rte_hash_lookup_bulk(h[table_index],
(const void **) keys_burst,
@@ -462,7 +511,8 @@ timed_lookups_multi(unsigned int with_data, unsigned int table_index,
const uint64_t end_tsc = rte_rdtsc();
const uint64_t time_taken = end_tsc - start_tsc;
- cycles[table_index][LOOKUP_MULTI][0][with_data] = time_taken/num_lookups;
+ cycles[table_index][LOOKUP_MULTI][with_hash][with_data] =
+ time_taken/num_lookups;
return 0;
}
@@ -543,7 +593,8 @@ run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks,
if (timed_lookups(with_hash, with_data, i, ext) < 0)
return -1;
- if (timed_lookups_multi(with_data, i, ext) < 0)
+ if (timed_lookups_multi(with_hash, with_data,
+ i, ext) < 0)
return -1;
if (timed_deletes(with_hash, with_data, i, ext) < 0)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 6af8ca4..38767a8 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1711,64 +1711,20 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
}
}
-#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_l(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
@@ -1903,63 +1859,20 @@ __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
}
static inline void
-__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2124,6 +2037,92 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+#define PREFETCH_OFFSET 4
+static inline void
+__bulk_lookup_prefetching_loop(const struct rte_hash *h,
+ const void **keys, int32_t num_keys,
+ uint16_t *sig,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt)
+{
+ int32_t i;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+}
+
+
+static inline void
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
@@ -2165,6 +2164,123 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
return __builtin_popcountl(*hit_mask);
}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, NULL, NULL);
+ return 0;
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index ed0673b..13099d7 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -519,6 +519,61 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
uint32_t num_keys, uint64_t *hit_mask, void *data[]);
/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param positions
+ * Output containing a list of values, corresponding to the list of keys that
+ * can be used by the caller as an offset into an array of user data. These
+ * values are unique for each key, and are the same values that were returned
+ * when each key was added. If a key in the list was not found, then -ENOENT
+ * will be the value.
+ * @return
+ * -EINVAL if there's an error, otherwise 0.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions);
+
+/**
+ * Find multiple keys in the hash table with precomputed hash value array.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
+ *
+ * @param h
+ * Hash table to look in.
+ * @param keys
+ * A pointer to a list of keys to look for.
+ * @param sig
+ * A pointer to a list of precomputed hash values for keys.
+ * @param num_keys
+ * How many keys are in the keys list (less than RTE_HASH_LOOKUP_BULK_MAX).
+ * @param hit_mask
+ * Output containing a bitmask with all successful lookups.
+ * @param data
+ * Output containing array of data returned from all the successful lookups.
+ * @return
+ * -EINVAL if there's an error, otherwise number of successful lookups.
+ */
+__rte_experimental
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[]);
+
+/**
* Find multiple keys in the hash table.
* This operation is multi-thread safe with regarding to other lookup threads.
* Read-write concurrency can be enabled by setting flag during
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index a8fbbc3..c2a9094 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -33,6 +33,8 @@ EXPERIMENTAL {
global:
rte_hash_free_key_with_position;
+ rte_hash_lookup_with_hash_bulk;
+ rte_hash_lookup_with_hash_bulk_data;
rte_hash_max_key_id;
};
--
2.7.4
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v5] hash: add hash bulk lookup with hash signatures array
2020-04-16 15:07 ` [dpdk-dev] [PATCH v5] " Vladimir Medvedkin
@ 2020-04-16 23:46 ` Wang, Yipeng1
2020-04-25 13:30 ` Thomas Monjalon
0 siblings, 1 reply; 19+ messages in thread
From: Wang, Yipeng1 @ 2020-04-16 23:46 UTC (permalink / raw)
To: Medvedkin, Vladimir, dev; +Cc: Gobriel, Sameh, Richardson, Bruce
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Thursday, April 16, 2020 8:07 AM
> To: dev@dpdk.org
> Cc: Wang, Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce <bruce.richardson@intel.com>
> Subject: [PATCH v5] hash: add hash bulk lookup with hash signatures array
>
> Implement rte_hash_lookup_with_hash_bulk_data() and
> rte_hash_lookup_with_hash_bulk() - bulk lookup functions with precomputed
> hash signatures.
> Add these two functions into performance tests.
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
[Wang, Yipeng] Hi, Vladimir, thanks for the changes per my comment.
It looks good now.
Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v5] hash: add hash bulk lookup with hash signatures array
2020-04-16 23:46 ` Wang, Yipeng1
@ 2020-04-25 13:30 ` Thomas Monjalon
2020-04-25 13:37 ` Thomas Monjalon
0 siblings, 1 reply; 19+ messages in thread
From: Thomas Monjalon @ 2020-04-25 13:30 UTC (permalink / raw)
To: Medvedkin, Vladimir; +Cc: dev, Gobriel, Sameh, Richardson, Bruce, Wang, Yipeng1
17/04/2020 01:46, Wang, Yipeng1:
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> >
> > Implement rte_hash_lookup_with_hash_bulk_data() and
> > rte_hash_lookup_with_hash_bulk() - bulk lookup functions with precomputed
> > hash signatures.
> > Add these two functions into performance tests.
> >
> > Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> > ---
> [Wang, Yipeng] Hi, Vladimir, thanks for the changes per my comment.
> It looks good now.
>
> Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
Applied, thanks
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [dpdk-dev] [PATCH v5] hash: add hash bulk lookup with hash signatures array
2020-04-25 13:30 ` Thomas Monjalon
@ 2020-04-25 13:37 ` Thomas Monjalon
0 siblings, 0 replies; 19+ messages in thread
From: Thomas Monjalon @ 2020-04-25 13:37 UTC (permalink / raw)
To: Medvedkin, Vladimir; +Cc: dev, Gobriel, Sameh, Richardson, Bruce, Wang, Yipeng1
25/04/2020 15:30, Thomas Monjalon:
> 17/04/2020 01:46, Wang, Yipeng1:
> > From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> > >
> > > Implement rte_hash_lookup_with_hash_bulk_data() and
> > > rte_hash_lookup_with_hash_bulk() - bulk lookup functions with precomputed
> > > hash signatures.
> > > Add these two functions into performance tests.
> > >
> > > Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> > > ---
> > [Wang, Yipeng] Hi, Vladimir, thanks for the changes per my comment.
> > It looks good now.
> >
> > Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
>
> Applied, thanks
Note: I've added this doxygen comment in the new functions:
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2020-04-25 13:37 UTC | newest]
Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-09 12:44 [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Vladimir Medvedkin
2020-03-09 12:44 ` [dpdk-dev] [PATCH 2/2] test: update hash performance tests Vladimir Medvedkin
2020-03-17 17:27 ` [dpdk-dev] [PATCH 1/2] hash: add hash bulk lookup with hash signatures array Wang, Yipeng1
2020-03-26 14:16 ` Medvedkin, Vladimir
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
2020-03-30 20:20 ` Wang, Yipeng1
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 " Vladimir Medvedkin
2020-04-16 9:47 ` Thomas Monjalon
2020-04-16 15:00 ` [dpdk-dev] [PATCH v4] " Vladimir Medvedkin
2020-04-16 15:07 ` [dpdk-dev] [PATCH v5] " Vladimir Medvedkin
2020-04-16 23:46 ` Wang, Yipeng1
2020-04-25 13:30 ` Thomas Monjalon
2020-04-25 13:37 ` Thomas Monjalon
2020-04-08 18:32 ` [dpdk-dev] [PATCH v3 2/2] test: update hash performance tests Vladimir Medvedkin
2020-04-16 9:44 ` Thomas Monjalon
2020-04-16 14:47 ` Medvedkin, Vladimir
2020-04-16 14:50 ` Thomas Monjalon
2020-03-26 16:47 ` [dpdk-dev] [PATCH v2 " Vladimir Medvedkin
2020-03-30 20:33 ` Wang, Yipeng1
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).