* [dpdk-dev] [PATCH 1/4] hash: prepare for lock-free and rw-lock separation
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
@ 2018-11-09 16:39 ` Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 2/4] hash: remove rw-lock calls from lock-free functions Honnappa Nagarahalli
` (3 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-09 16:39 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
Copy and create the lock-free versions of lookup
functions.
This is an intermediate commit meant to ease the
review process.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 325 ++++++++++++++++++++++++++++++
1 file changed, 325 insertions(+)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 5ddcccd87..874d77f1c 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1162,6 +1162,39 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
return -1;
}
+/* Search one bucket to find the match key */
+static inline int32_t
+search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
+ void **data, const struct rte_hash_bucket *bkt)
+{
+ int i;
+ uint32_t key_idx;
+ void *pdata;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ key_idx = __atomic_load_n(&bkt->key_idx[i],
+ __ATOMIC_ACQUIRE);
+ if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ key_idx * h->key_entry_size);
+ pdata = __atomic_load_n(&k->pdata,
+ __ATOMIC_ACQUIRE);
+
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ if (data != NULL)
+ *data = pdata;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return key_idx - 1;
+ }
+ }
+ }
+ return -1;
+}
+
static inline int32_t
__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
hash_sig_t sig, void **data)
@@ -1227,6 +1260,71 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
return -ENOENT;
}
+static inline int32_t
+__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ struct rte_hash_bucket *bkt, *cur_bkt;
+ uint32_t cnt_b, cnt_a;
+ int ret;
+ uint16_t short_sig;
+
+ short_sig = get_short_sig(sig);
+ prim_bucket_idx = get_prim_bucket_index(h, sig);
+ sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+
+ __hash_rw_reader_lock(h);
+
+ do {
+ /* Load the table change counter before the lookup
+ * starts. Acquire semantics will make sure that
+ * loads in search_one_bucket are not hoisted.
+ */
+ cnt_b = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+
+ /* Check if key is in primary location */
+ bkt = &h->buckets[prim_bucket_idx];
+ ret = search_one_bucket(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
+
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket(h, key, short_sig,
+ data, cur_bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ }
+
+ /* The loads of sig_current in search_one_bucket
+ * should not move below the load from tbl_chng_cnt.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ /* Re-read the table change counter to check if the
+ * table has changed during search. If yes, re-do
+ * the search.
+ * This load should not get hoisted. The load
+ * acquires on cnt_b, key index in primary bucket
+ * and key index in secondary bucket will make sure
+ * that it does not get hoisted.
+ */
+ cnt_a = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+ } while (cnt_b != cnt_a);
+
+ __hash_rw_reader_unlock(h);
+
+ return -ENOENT;
+}
+
int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig)
@@ -1754,6 +1852,233 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+ void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t cnt_b, cnt_a;
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __hash_rw_reader_lock(h);
+ do {
+ /* Load the table change counter before the lookup
+ * starts. Acquire semantics will make sure that
+ * loads in compare_signatures are not hoisted.
+ */
+ cnt_b = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ __atomic_load_n(
+ &primary_bkt[i]->key_idx[hit_index],
+ __ATOMIC_ACQUIRE);
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ if (key_idx != EMPTY_SLOT)
+ pdata[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = pdata[i];
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ __atomic_load_n(
+ &secondary_bkt[i]->key_idx[hit_index],
+ __ATOMIC_ACQUIRE);
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ if (key_idx != EMPTY_SLOT)
+ pdata[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = pdata[i];
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* The loads of sig_current in compare_signatures
+ * should not move below the load from tbl_chng_cnt.
+ */
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ /* Re-read the table change counter to check if the
+ * table has changed during search. If yes, re-do
+ * the search.
+ * This load should not get hoisted. The load
+ * acquires on cnt_b, primary key index and secondary
+ * key index will make sure that it does not get
+ * hoisted.
+ */
+ cnt_a = __atomic_load_n(h->tbl_chng_cnt,
+ __ATOMIC_ACQUIRE);
+ } while (cnt_b != cnt_a);
+
+ /* all found, do not need to go through ext bkt */
+ if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ __hash_rw_reader_unlock(h);
+ return;
+ }
+
+ /* need to check ext buckets for match */
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket(h, keys[i],
+ sig[i], &data[i], cur_bkt);
+ else
+ ret = search_one_bucket(h, keys[i],
+ sig[i], NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH 2/4] hash: remove rw-lock calls from lock-free functions
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 1/4] hash: prepare for lock-free and rw-lock separation Honnappa Nagarahalli
@ 2018-11-09 16:39 ` Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns Honnappa Nagarahalli
` (2 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-09 16:39 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
Remove the rw-lock calls from lock-free versions of lookup
functions.
This is an intermediate commit meant to ease the
review process.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 15 ++++-----------
1 file changed, 4 insertions(+), 11 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 874d77f1c..e6b84c6bc 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1274,8 +1274,6 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
prim_bucket_idx = get_prim_bucket_index(h, sig);
sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
- __hash_rw_reader_lock(h);
-
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -1286,7 +1284,7 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
@@ -1296,7 +1294,7 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
/* Check if key is in secondary location */
FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
+ ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
@@ -1320,8 +1318,6 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
__ATOMIC_ACQUIRE);
} while (cnt_b != cnt_a);
- __hash_rw_reader_unlock(h);
-
return -ENOENT;
}
@@ -1911,7 +1907,6 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
rte_prefetch0(secondary_bkt[i]);
}
- __hash_rw_reader_lock(h);
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -2060,10 +2055,10 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
next_bkt = secondary_bkt[i]->next;
FOR_EACH_BUCKET(cur_bkt, next_bkt) {
if (data != NULL)
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], &data[i], cur_bkt);
else
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], NULL, cur_bkt);
if (ret != -1) {
positions[i] = ret;
@@ -2073,8 +2068,6 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
}
}
- __hash_rw_reader_unlock(h);
-
if (hit_mask != NULL)
*hit_mask = hits;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 1/4] hash: prepare for lock-free and rw-lock separation Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 2/4] hash: remove rw-lock calls from lock-free functions Honnappa Nagarahalli
@ 2018-11-09 16:39 ` Honnappa Nagarahalli
2018-11-10 8:51 ` Jerin Jacob
2018-11-09 16:39 ` [dpdk-dev] [PATCH 4/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 0/1] " Honnappa Nagarahalli
4 siblings, 1 reply; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-09 16:39 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
Remove the memory orderings from lookup functions using
rw-lock.
This is an intermediate commit meant to ease the
review process.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 277 +++++++++++-------------------
1 file changed, 105 insertions(+), 172 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index e6b84c6bc..9390dc5e4 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1135,27 +1135,22 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
void **data, const struct rte_hash_bucket *bkt)
{
int i;
- uint32_t key_idx;
- void *pdata;
struct rte_hash_key *k, *keys = h->key_store;
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- key_idx = __atomic_load_n(&bkt->key_idx[i],
- __ATOMIC_ACQUIRE);
- if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
+ if (bkt->sig_current[i] == sig &&
+ bkt->key_idx[i] != EMPTY_SLOT) {
k = (struct rte_hash_key *) ((char *)keys +
- key_idx * h->key_entry_size);
- pdata = __atomic_load_n(&k->pdata,
- __ATOMIC_ACQUIRE);
+ bkt->key_idx[i] * h->key_entry_size);
if (rte_hash_cmp_eq(key, k->key, h) == 0) {
if (data != NULL)
- *data = pdata;
+ *data = k->pdata;
/*
* Return index where key is stored,
* subtracting the first dummy index
*/
- return key_idx - 1;
+ return bkt->key_idx[i] - 1;
}
}
}
@@ -1201,7 +1196,6 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
- uint32_t cnt_b, cnt_a;
int ret;
uint16_t short_sig;
@@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
__hash_rw_reader_lock(h);
- do {
- /* Load the table change counter before the lookup
- * starts. Acquire semantics will make sure that
- * loads in search_one_bucket are not hoisted.
- */
- cnt_b = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
+ /* Check if key is in primary location */
+ bkt = &h->buckets[prim_bucket_idx];
+ ret = search_one_bucket(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
- /* Check if key is in primary location */
- bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket(h, key, short_sig,
+ data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
}
- /* Calculate secondary hash */
- bkt = &h->buckets[sec_bucket_idx];
-
- /* Check if key is in secondary location */
- FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
- data, cur_bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
- return ret;
- }
- }
-
- /* The loads of sig_current in search_one_bucket
- * should not move below the load from tbl_chng_cnt.
- */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
- /* Re-read the table change counter to check if the
- * table has changed during search. If yes, re-do
- * the search.
- * This load should not get hoisted. The load
- * acquires on cnt_b, key index in primary bucket
- * and key index in secondary bucket will make sure
- * that it does not get hoisted.
- */
- cnt_a = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
- } while (cnt_b != cnt_a);
+ }
__hash_rw_reader_unlock(h);
@@ -1638,8 +1608,6 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t cnt_b, cnt_a;
/* Prefetch first keys */
for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
@@ -1681,138 +1649,103 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
}
__hash_rw_reader_lock(h);
- do {
- /* Load the table change counter before the lookup
- * starts. Acquire semantics will make sure that
- * loads in compare_signatures are not hoisted.
- */
- cnt_b = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
-
- /* Compare signatures and prefetch key slot of first hit */
- for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
- primary_bkt[i], secondary_bkt[i],
- sig[i], h->sig_cmp_fn);
-
- if (prim_hitmask[i]) {
- uint32_t first_hit =
- __builtin_ctzl(prim_hitmask[i])
- >> 1;
- uint32_t key_idx =
- primary_bkt[i]->key_idx[first_hit];
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
- rte_prefetch0(key_slot);
- continue;
- }
- if (sec_hitmask[i]) {
- uint32_t first_hit =
- __builtin_ctzl(sec_hitmask[i])
- >> 1;
- uint32_t key_idx =
- secondary_bkt[i]->key_idx[first_hit];
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
- rte_prefetch0(key_slot);
- }
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
}
- /* Compare keys, first hits in primary first */
- for (i = 0; i < num_keys; i++) {
- positions[i] = -ENOENT;
- while (prim_hitmask[i]) {
- uint32_t hit_index =
- __builtin_ctzl(prim_hitmask[i])
- >> 1;
- uint32_t key_idx =
- __atomic_load_n(
- &primary_bkt[i]->key_idx[hit_index],
- __ATOMIC_ACQUIRE);
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
- /*
- * If key index is 0, do not compare key,
- * as it is checking the dummy slot
- */
- if (!!key_idx &
- !rte_hash_cmp_eq(
- key_slot->key, keys[i], h)) {
- if (data != NULL)
- data[i] = pdata[i];
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
- hits |= 1ULL << i;
- positions[i] = key_idx - 1;
- goto next_key;
- }
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
}
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
- while (sec_hitmask[i]) {
- uint32_t hit_index =
- __builtin_ctzl(sec_hitmask[i])
- >> 1;
- uint32_t key_idx =
- __atomic_load_n(
- &secondary_bkt[i]->key_idx[hit_index],
- __ATOMIC_ACQUIRE);
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
-
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
- /*
- * If key index is 0, do not compare key,
- * as it is checking the dummy slot
- */
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
- if (!!key_idx &
- !rte_hash_cmp_eq(
- key_slot->key, keys[i], h)) {
- if (data != NULL)
- data[i] = pdata[i];
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
- hits |= 1ULL << i;
- positions[i] = key_idx - 1;
- goto next_key;
- }
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
}
-next_key:
- continue;
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
}
-
- /* The loads of sig_current in compare_signatures
- * should not move below the load from tbl_chng_cnt.
- */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
- /* Re-read the table change counter to check if the
- * table has changed during search. If yes, re-do
- * the search.
- * This load should not get hoisted. The load
- * acquires on cnt_b, primary key index and secondary
- * key index will make sure that it does not get
- * hoisted.
- */
- cnt_a = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
- } while (cnt_b != cnt_a);
+next_key:
+ continue;
+ }
/* all found, do not need to go through ext bkt */
if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns
2018-11-09 16:39 ` [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns Honnappa Nagarahalli
@ 2018-11-10 8:51 ` Jerin Jacob
2018-11-10 18:58 ` Honnappa Nagarahalli
0 siblings, 1 reply; 13+ messages in thread
From: Jerin Jacob @ 2018-11-10 8:51 UTC (permalink / raw)
To: Honnappa Nagarahalli
Cc: bruce.richardson, pablo.de.lara.guarch, dev, hemant.agrawal,
chaozhu, yipeng1.wang, dharmik.thakkar, gavin.hu, nd
-----Original Message-----
> Date: Fri, 9 Nov 2018 10:39:16 -0600
> From: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> To: bruce.richardson@intel.com, pablo.de.lara.guarch@intel.com
> CC: dev@dpdk.org, jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,
> chaozhu@linux.vnet.ibm.com, yipeng1.wang@intel.com,
> dharmik.thakkar@arm.com, gavin.hu@arm.com, honnappa.nagarahalli@arm.com,
> nd@arm.com
> Subject: [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns
> X-Mailer: git-send-email 2.17.1
>
>
> Remove the memory orderings from lookup functions using
> rw-lock.
> This is an intermediate commit meant to ease the
> review process.
>
> Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
> Cc: honnappa.nagarahalli@arm.com
>
> Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> ---
> lib/librte_hash/rte_cuckoo_hash.c | 277 +++++++++++-------------------
> 1 file changed, 105 insertions(+), 172 deletions(-)
>
> diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
> index e6b84c6bc..9390dc5e4 100644
> --- a/lib/librte_hash/rte_cuckoo_hash.c
> +++ b/lib/librte_hash/rte_cuckoo_hash.c
> @@ -1135,27 +1135,22 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
> void **data, const struct rte_hash_bucket *bkt)
> {
> int i;
> - uint32_t key_idx;
> - void *pdata;
> struct rte_hash_key *k, *keys = h->key_store;
>
> for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
> - key_idx = __atomic_load_n(&bkt->key_idx[i],
> - __ATOMIC_ACQUIRE);
> - if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
> + if (bkt->sig_current[i] == sig &&
> + bkt->key_idx[i] != EMPTY_SLOT) {
> k = (struct rte_hash_key *) ((char *)keys +
> - key_idx * h->key_entry_size);
> - pdata = __atomic_load_n(&k->pdata,
> - __ATOMIC_ACQUIRE);
> + bkt->key_idx[i] * h->key_entry_size);
>
> if (rte_hash_cmp_eq(key, k->key, h) == 0) {
> if (data != NULL)
> - *data = pdata;
> + *data = k->pdata;
> /*
> * Return index where key is stored,
> * subtracting the first dummy index
> */
> - return key_idx - 1;
> + return bkt->key_idx[i] - 1;
> }
> }
> }
> @@ -1201,7 +1196,6 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
> {
> uint32_t prim_bucket_idx, sec_bucket_idx;
> struct rte_hash_bucket *bkt, *cur_bkt;
> - uint32_t cnt_b, cnt_a;
> int ret;
> uint16_t short_sig;
>
> @@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
>
> __hash_rw_reader_lock(h);
>
> - do {
> - /* Load the table change counter before the lookup
> - * starts. Acquire semantics will make sure that
> - * loads in search_one_bucket are not hoisted.
> - */
> - cnt_b = __atomic_load_n(h->tbl_chng_cnt,
> - __ATOMIC_ACQUIRE);
> + /* Check if key is in primary location */
> + bkt = &h->buckets[prim_bucket_idx];
In original version, this bkt assignment is before to __hash_rw_reader_lock().
This causing performance issue in lookup 'hit' case.
Following change is fixing it.i.e bringing back to orginal version.
[master]83xx1.2[dpdk]# git diff
diff --git a/lib/librte_hash/rte_cuckoo_hash.c
b/lib/librte_hash/rte_cuckoo_hash.c
index 7e1a9ac96..bc8a55f0f 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1204,10 +1204,11 @@ __rte_hash_lookup_with_hash_l(const struct
rte_hash *h, const void *key,
prim_bucket_idx = get_prim_bucket_index(h, sig);
sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx,
short_sig);
- __hash_rw_reader_lock(h);
-
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
+
+ __hash_rw_reader_lock(h);
+
ret = search_one_bucket_l(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
Could you send the final version that needs to taken into tree.
i.e remove intermediate commits only for review purpose.
I can test it finally with that.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns
2018-11-10 8:51 ` Jerin Jacob
@ 2018-11-10 18:58 ` Honnappa Nagarahalli
0 siblings, 0 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-10 18:58 UTC (permalink / raw)
To: Jerin Jacob
Cc: bruce.richardson, pablo.de.lara.guarch, dev, hemant.agrawal,
chaozhu, yipeng1.wang, Dharmik Thakkar,
Gavin Hu (Arm Technology China),
nd, nd
> > @@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct
> > rte_hash *h, const void *key,
> >
> > __hash_rw_reader_lock(h);
> >
> > - do {
> > - /* Load the table change counter before the lookup
> > - * starts. Acquire semantics will make sure that
> > - * loads in search_one_bucket are not hoisted.
> > - */
> > - cnt_b = __atomic_load_n(h->tbl_chng_cnt,
> > - __ATOMIC_ACQUIRE);
> > + /* Check if key is in primary location */
> > + bkt = &h->buckets[prim_bucket_idx];
>
>
> In original version, this bkt assignment is before to __hash_rw_reader_lock().
> This causing performance issue in lookup 'hit' case.
>
> Following change is fixing it.i.e bringing back to orginal version.
>
> [master]83xx1.2[dpdk]# git diff
> diff --git a/lib/librte_hash/rte_cuckoo_hash.c
> b/lib/librte_hash/rte_cuckoo_hash.c
> index 7e1a9ac96..bc8a55f0f 100644
> --- a/lib/librte_hash/rte_cuckoo_hash.c
> +++ b/lib/librte_hash/rte_cuckoo_hash.c
> @@ -1204,10 +1204,11 @@ __rte_hash_lookup_with_hash_l(const struct
> rte_hash *h, const void *key,
> prim_bucket_idx = get_prim_bucket_index(h, sig);
> sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
>
> - __hash_rw_reader_lock(h);
> -
> /* Check if key is in primary location */
> bkt = &h->buckets[prim_bucket_idx];
> +
> + __hash_rw_reader_lock(h);
> +
> ret = search_one_bucket_l(h, key, short_sig, data, bkt);
> if (ret != -1) {
> __hash_rw_reader_unlock(h);
>
>
> Could you send the final version that needs to taken into tree.
> i.e remove intermediate commits only for review purpose.
> I can test it finally with that.
Thanks Jerin for testing. I have sent out v2.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH 4/4] hash: separate lf and rw lock lookup code paths
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
` (2 preceding siblings ...)
2018-11-09 16:39 ` [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns Honnappa Nagarahalli
@ 2018-11-09 16:39 ` Honnappa Nagarahalli
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 0/1] " Honnappa Nagarahalli
4 siblings, 0 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-09 16:39 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
The lock-free algorithm has caused significant lookup
performance regression for certain use cases. The
regression is attributed to the use of non-relaxed
memory orderings. 2 versions of the lookup functions
are created. One that uses the RW lock and the one that
is lock-free. This restores the performance regression
caused for use cases that used RW lock version of the
lookup function.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 44 ++++++++++++++++++++++++-------
1 file changed, 34 insertions(+), 10 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 9390dc5e4..7e1a9ac96 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1129,10 +1129,11 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
return ret;
}
-/* Search one bucket to find the match key */
+/* Search one bucket to find the match key - uses rw lock */
static inline int32_t
-search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
- void **data, const struct rte_hash_bucket *bkt)
+search_one_bucket_l(const struct rte_hash *h, const void *key,
+ uint16_t sig, void **data,
+ const struct rte_hash_bucket *bkt)
{
int i;
struct rte_hash_key *k, *keys = h->key_store;
@@ -1191,8 +1192,8 @@ search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
}
static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig, void **data)
+__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
@@ -1207,7 +1208,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ ret = search_one_bucket_l(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
@@ -1217,7 +1218,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in secondary location */
FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
+ ret = search_one_bucket_l(h, key, short_sig,
data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
@@ -1291,6 +1292,16 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
return -ENOENT;
}
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
+ else
+ return __rte_hash_lookup_with_hash_l(h, key, sig, data);
+}
+
int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig)
@@ -1592,7 +1603,7 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
uint64_t *hit_mask, void *data[])
{
@@ -1762,10 +1773,10 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
next_bkt = secondary_bkt[i]->next;
FOR_EACH_BUCKET(cur_bkt, next_bkt) {
if (data != NULL)
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_l(h, keys[i],
sig[i], &data[i], cur_bkt);
else
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_l(h, keys[i],
sig[i], NULL, cur_bkt);
if (ret != -1) {
positions[i] = ret;
@@ -2005,6 +2016,19 @@ __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
*hit_mask = hits;
}
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_bulk_lf(h, keys, num_keys,
+ positions, hit_mask, data);
+ else
+ return __rte_hash_lookup_bulk_l(h, keys, num_keys,
+ positions, hit_mask, data);
+}
+
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v2 0/1] hash: separate lf and rw lock lookup code paths
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
` (3 preceding siblings ...)
2018-11-09 16:39 ` [dpdk-dev] [PATCH 4/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
@ 2018-11-10 18:55 ` Honnappa Nagarahalli
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 1/1] " Honnappa Nagarahalli
4 siblings, 1 reply; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-10 18:55 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
The lock-free algorithm has caused significant lookup
performance regression for certain use cases. The
regression is attributed to the use of non-relaxed
memory orderings.
To address the issue, 2 versions of the lookup functions
are created. One that uses the RW lock and the one that
is lock-free. This restores the performance regression
caused for use cases that used RW lock version of the
lookup function.
v2:
1) Adjusted the function call to take the lock in
__rte_hash_lookup_with_hash_l (Jerin)
2) Squash all the intermediate commits into a single one
Honnappa Nagarahalli (1):
hash: separate lf and rw lock lookup code paths
lib/librte_hash/rte_cuckoo_hash.c | 304 ++++++++++++++++++++++++++++--
1 file changed, 290 insertions(+), 14 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 0/1] " Honnappa Nagarahalli
@ 2018-11-10 18:55 ` Honnappa Nagarahalli
2018-11-11 7:48 ` Jerin Jacob
2018-11-11 21:43 ` Wang, Yipeng1
0 siblings, 2 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-10 18:55 UTC (permalink / raw)
To: bruce.richardson, pablo.de.lara.guarch
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, yipeng1.wang,
dharmik.thakkar, gavin.hu, honnappa.nagarahalli, nd
The lock-free algorithm has caused significant lookup
performance regression for certain use cases. The
regression is attributed to the use of non-relaxed
memory orderings. 2 versions of the lookup functions
are created. One that uses the RW lock and the one that
is lock-free. This restores the performance regression
caused for use cases that used RW lock version of the
lookup function.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 304 ++++++++++++++++++++++++++++--
1 file changed, 290 insertions(+), 14 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 5ddcccd87..e68bf336b 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1129,9 +1129,38 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
return ret;
}
+/* Search one bucket to find the match key - uses rw lock */
+static inline int32_t
+search_one_bucket_l(const struct rte_hash *h, const void *key,
+ uint16_t sig, void **data,
+ const struct rte_hash_bucket *bkt)
+{
+ int i;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->sig_current[i] == sig &&
+ bkt->key_idx[i] != EMPTY_SLOT) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ if (data != NULL)
+ *data = k->pdata;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bkt->key_idx[i] - 1;
+ }
+ }
+ }
+ return -1;
+}
+
/* Search one bucket to find the match key */
static inline int32_t
-search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
+search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
void **data, const struct rte_hash_bucket *bkt)
{
int i;
@@ -1163,12 +1192,11 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
}
static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig, void **data)
+__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
- uint32_t cnt_b, cnt_a;
int ret;
uint16_t short_sig;
@@ -1176,8 +1204,48 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
prim_bucket_idx = get_prim_bucket_index(h, sig);
sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+ bkt = &h->buckets[prim_bucket_idx];
+
__hash_rw_reader_lock(h);
+ /* Check if key is in primary location */
+ ret = search_one_bucket_l(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
+
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket_l(h, key, short_sig,
+ data, cur_bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ return -ENOENT;
+}
+
+static inline int32_t
+__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ struct rte_hash_bucket *bkt, *cur_bkt;
+ uint32_t cnt_b, cnt_a;
+ int ret;
+ uint16_t short_sig;
+
+ short_sig = get_short_sig(sig);
+ prim_bucket_idx = get_prim_bucket_index(h, sig);
+ sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -1188,7 +1256,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
@@ -1198,7 +1266,7 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
/* Check if key is in secondary location */
FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
+ ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
@@ -1222,11 +1290,19 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
__ATOMIC_ACQUIRE);
} while (cnt_b != cnt_a);
- __hash_rw_reader_unlock(h);
-
return -ENOENT;
}
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
+ else
+ return __rte_hash_lookup_with_hash_l(h, key, sig, data);
+}
+
int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig)
@@ -1528,7 +1604,197 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __hash_rw_reader_lock(h);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* all found, do not need to go through ext bkt */
+ if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ __hash_rw_reader_unlock(h);
+ return;
+ }
+
+ /* need to check ext buckets for match */
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
uint64_t *hit_mask, void *data[])
{
@@ -1586,7 +1852,6 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
rte_prefetch0(secondary_bkt[i]);
}
- __hash_rw_reader_lock(h);
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
@@ -1735,10 +2000,10 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
next_bkt = secondary_bkt[i]->next;
FOR_EACH_BUCKET(cur_bkt, next_bkt) {
if (data != NULL)
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], &data[i], cur_bkt);
else
- ret = search_one_bucket(h, keys[i],
+ ret = search_one_bucket_lf(h, keys[i],
sig[i], NULL, cur_bkt);
if (ret != -1) {
positions[i] = ret;
@@ -1748,12 +2013,23 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
}
}
- __hash_rw_reader_unlock(h);
-
if (hit_mask != NULL)
*hit_mask = hits;
}
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_bulk_lf(h, keys, num_keys,
+ positions, hit_mask, data);
+ else
+ return __rte_hash_lookup_bulk_l(h, keys, num_keys,
+ positions, hit_mask, data);
+}
+
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions)
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 1/1] " Honnappa Nagarahalli
@ 2018-11-11 7:48 ` Jerin Jacob
2018-11-13 16:37 ` Thomas Monjalon
2018-11-11 21:43 ` Wang, Yipeng1
1 sibling, 1 reply; 13+ messages in thread
From: Jerin Jacob @ 2018-11-11 7:48 UTC (permalink / raw)
To: Honnappa Nagarahalli
Cc: bruce.richardson, pablo.de.lara.guarch, dev, hemant.agrawal,
chaozhu, yipeng1.wang, dharmik.thakkar, gavin.hu, nd, thomas,
Kapoor, Prasun
-----Original Message-----
> Date: Sat, 10 Nov 2018 12:55:34 -0600
> From: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> To: bruce.richardson@intel.com, pablo.de.lara.guarch@intel.com
> CC: dev@dpdk.org, jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,
> chaozhu@linux.vnet.ibm.com, yipeng1.wang@intel.com,
> dharmik.thakkar@arm.com, gavin.hu@arm.com, honnappa.nagarahalli@arm.com,
> nd@arm.com
> Subject: [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
> X-Mailer: git-send-email 2.17.1
>
>
> The lock-free algorithm has caused significant lookup
> performance regression for certain use cases. The
> regression is attributed to the use of non-relaxed
> memory orderings. 2 versions of the lookup functions
> are created. One that uses the RW lock and the one that
> is lock-free. This restores the performance regression
> caused for use cases that used RW lock version of the
> lookup function.
>
> Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
> Cc: honnappa.nagarahalli@arm.com
>
> Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> ---
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
- Reported l3fwd hash regression for ARMv8 platform fixed
with this patch by introducing two different code path(obviously!!)
- Verified lock version of lookup() is same as e605a1d36~1 changeset
+ Thomas,
If there is no objection, please consider this patch into -RC3
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
2018-11-11 7:48 ` Jerin Jacob
@ 2018-11-13 16:37 ` Thomas Monjalon
0 siblings, 0 replies; 13+ messages in thread
From: Thomas Monjalon @ 2018-11-13 16:37 UTC (permalink / raw)
To: Honnappa Nagarahalli
Cc: dev, Jerin Jacob, bruce.richardson, pablo.de.lara.guarch,
hemant.agrawal, chaozhu, yipeng1.wang, dharmik.thakkar, gavin.hu,
nd, Kapoor, Prasun
11/11/2018 08:48, Jerin Jacob:
> >
> > The lock-free algorithm has caused significant lookup
> > performance regression for certain use cases. The
> > regression is attributed to the use of non-relaxed
> > memory orderings. 2 versions of the lookup functions
> > are created. One that uses the RW lock and the one that
> > is lock-free. This restores the performance regression
> > caused for use cases that used RW lock version of the
> > lookup function.
> >
> > Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
> > Cc: honnappa.nagarahalli@arm.com
> >
> > Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> > Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> > ---
>
> Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Tested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Applied, thanks
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 1/1] " Honnappa Nagarahalli
2018-11-11 7:48 ` Jerin Jacob
@ 2018-11-11 21:43 ` Wang, Yipeng1
2018-11-12 4:50 ` Honnappa Nagarahalli
1 sibling, 1 reply; 13+ messages in thread
From: Wang, Yipeng1 @ 2018-11-11 21:43 UTC (permalink / raw)
To: Honnappa Nagarahalli, Richardson, Bruce, De Lara Guarch, Pablo
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, dharmik.thakkar, gavin.hu, nd
> -----Original Message-----
> From: Honnappa Nagarahalli [mailto:honnappa.nagarahalli@arm.com]
> Sent: Saturday, November 10, 2018 10:56 AM
> To: Richardson, Bruce <bruce.richardson@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: dev@dpdk.org; jerin.jacob@caviumnetworks.com;
> hemant.agrawal@nxp.com; chaozhu@linux.vnet.ibm.com; Wang, Yipeng1
> <yipeng1.wang@intel.com>; dharmik.thakkar@arm.com; gavin.hu@arm.com;
> honnappa.nagarahalli@arm.com; nd@arm.com
> Subject: [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
>
> The lock-free algorithm has caused significant lookup performance
> regression for certain use cases. The regression is attributed to the use of
> non-relaxed memory orderings. 2 versions of the lookup functions are
> created. One that uses the RW lock and the one that is lock-free. This
> restores the performance regression caused for use cases that used RW lock
> version of the lookup function.
>
> Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
> Cc: honnappa.nagarahalli@arm.com
>
> Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> ---
[Wang, Yipeng]
I tested my modified l3fwd with this new patch applied on x86 platform and it does
not cause any performance drop anymore.
There are extra code duplication though but I believe in future version together
with fined-grained lock, the duplication could be fixed later.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/1] hash: separate lf and rw lock lookup code paths
2018-11-11 21:43 ` Wang, Yipeng1
@ 2018-11-12 4:50 ` Honnappa Nagarahalli
0 siblings, 0 replies; 13+ messages in thread
From: Honnappa Nagarahalli @ 2018-11-12 4:50 UTC (permalink / raw)
To: Wang, Yipeng1, Richardson, Bruce, De Lara Guarch, Pablo
Cc: dev, jerin.jacob, hemant.agrawal, chaozhu, Dharmik Thakkar,
Gavin Hu (Arm Technology China),
nd, nd
> > Subject: [PATCH v2 1/1] hash: separate lf and rw lock lookup code
> > paths
> >
> > The lock-free algorithm has caused significant lookup performance
> > regression for certain use cases. The regression is attributed to the
> > use of non-relaxed memory orderings. 2 versions of the lookup
> > functions are created. One that uses the RW lock and the one that is
> > lock-free. This restores the performance regression caused for use
> > cases that used RW lock version of the lookup function.
> >
> > Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
> > Cc: honnappa.nagarahalli@arm.com
> >
> > Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> > Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
> > Reviewed-by: Gavin Hu <gavin.hu@arm.com>
> > ---
> [Wang, Yipeng]
> I tested my modified l3fwd with this new patch applied on x86 platform and it
> does not cause any performance drop anymore.
>
> There are extra code duplication though but I believe in future version
> together with fined-grained lock, the duplication could be fixed later.
>
Thank you Yipeng for testing this. Agree, the code duplication is worrying. I will try to get the optimization patch out soon. Once we have that we can do the testing and take a decision to address the duplication.
^ permalink raw reply [flat|nested] 13+ messages in thread