From: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
To: bruce.richardson@intel.com, pablo.de.lara.guarch@intel.com
Cc: dev@dpdk.org, jerin.jacob@caviumnetworks.com,
hemant.agrawal@nxp.com, chaozhu@linux.vnet.ibm.com,
yipeng1.wang@intel.com, dharmik.thakkar@arm.com,
gavin.hu@arm.com, honnappa.nagarahalli@arm.com, nd@arm.com
Subject: [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns
Date: Fri, 9 Nov 2018 10:39:16 -0600 [thread overview]
Message-ID: <20181109163917.16845-4-honnappa.nagarahalli@arm.com> (raw)
In-Reply-To: <20181109163917.16845-1-honnappa.nagarahalli@arm.com>
Remove the memory orderings from lookup functions using
rw-lock.
This is an intermediate commit meant to ease the
review process.
Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com
Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
lib/librte_hash/rte_cuckoo_hash.c | 277 +++++++++++-------------------
1 file changed, 105 insertions(+), 172 deletions(-)
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index e6b84c6bc..9390dc5e4 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1135,27 +1135,22 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
void **data, const struct rte_hash_bucket *bkt)
{
int i;
- uint32_t key_idx;
- void *pdata;
struct rte_hash_key *k, *keys = h->key_store;
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- key_idx = __atomic_load_n(&bkt->key_idx[i],
- __ATOMIC_ACQUIRE);
- if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
+ if (bkt->sig_current[i] == sig &&
+ bkt->key_idx[i] != EMPTY_SLOT) {
k = (struct rte_hash_key *) ((char *)keys +
- key_idx * h->key_entry_size);
- pdata = __atomic_load_n(&k->pdata,
- __ATOMIC_ACQUIRE);
+ bkt->key_idx[i] * h->key_entry_size);
if (rte_hash_cmp_eq(key, k->key, h) == 0) {
if (data != NULL)
- *data = pdata;
+ *data = k->pdata;
/*
* Return index where key is stored,
* subtracting the first dummy index
*/
- return key_idx - 1;
+ return bkt->key_idx[i] - 1;
}
}
}
@@ -1201,7 +1196,6 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
- uint32_t cnt_b, cnt_a;
int ret;
uint16_t short_sig;
@@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
__hash_rw_reader_lock(h);
- do {
- /* Load the table change counter before the lookup
- * starts. Acquire semantics will make sure that
- * loads in search_one_bucket are not hoisted.
- */
- cnt_b = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
+ /* Check if key is in primary location */
+ bkt = &h->buckets[prim_bucket_idx];
+ ret = search_one_bucket(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
- /* Check if key is in primary location */
- bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket(h, key, short_sig,
+ data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
}
- /* Calculate secondary hash */
- bkt = &h->buckets[sec_bucket_idx];
-
- /* Check if key is in secondary location */
- FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
- data, cur_bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
- return ret;
- }
- }
-
- /* The loads of sig_current in search_one_bucket
- * should not move below the load from tbl_chng_cnt.
- */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
- /* Re-read the table change counter to check if the
- * table has changed during search. If yes, re-do
- * the search.
- * This load should not get hoisted. The load
- * acquires on cnt_b, key index in primary bucket
- * and key index in secondary bucket will make sure
- * that it does not get hoisted.
- */
- cnt_a = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
- } while (cnt_b != cnt_a);
+ }
__hash_rw_reader_unlock(h);
@@ -1638,8 +1608,6 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t cnt_b, cnt_a;
/* Prefetch first keys */
for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
@@ -1681,138 +1649,103 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
}
__hash_rw_reader_lock(h);
- do {
- /* Load the table change counter before the lookup
- * starts. Acquire semantics will make sure that
- * loads in compare_signatures are not hoisted.
- */
- cnt_b = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
-
- /* Compare signatures and prefetch key slot of first hit */
- for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
- primary_bkt[i], secondary_bkt[i],
- sig[i], h->sig_cmp_fn);
-
- if (prim_hitmask[i]) {
- uint32_t first_hit =
- __builtin_ctzl(prim_hitmask[i])
- >> 1;
- uint32_t key_idx =
- primary_bkt[i]->key_idx[first_hit];
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
- rte_prefetch0(key_slot);
- continue;
- }
- if (sec_hitmask[i]) {
- uint32_t first_hit =
- __builtin_ctzl(sec_hitmask[i])
- >> 1;
- uint32_t key_idx =
- secondary_bkt[i]->key_idx[first_hit];
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
- rte_prefetch0(key_slot);
- }
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
}
- /* Compare keys, first hits in primary first */
- for (i = 0; i < num_keys; i++) {
- positions[i] = -ENOENT;
- while (prim_hitmask[i]) {
- uint32_t hit_index =
- __builtin_ctzl(prim_hitmask[i])
- >> 1;
- uint32_t key_idx =
- __atomic_load_n(
- &primary_bkt[i]->key_idx[hit_index],
- __ATOMIC_ACQUIRE);
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
- /*
- * If key index is 0, do not compare key,
- * as it is checking the dummy slot
- */
- if (!!key_idx &
- !rte_hash_cmp_eq(
- key_slot->key, keys[i], h)) {
- if (data != NULL)
- data[i] = pdata[i];
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
- hits |= 1ULL << i;
- positions[i] = key_idx - 1;
- goto next_key;
- }
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
}
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
- while (sec_hitmask[i]) {
- uint32_t hit_index =
- __builtin_ctzl(sec_hitmask[i])
- >> 1;
- uint32_t key_idx =
- __atomic_load_n(
- &secondary_bkt[i]->key_idx[hit_index],
- __ATOMIC_ACQUIRE);
- const struct rte_hash_key *key_slot =
- (const struct rte_hash_key *)(
- (const char *)h->key_store +
- key_idx * h->key_entry_size);
-
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
- /*
- * If key index is 0, do not compare key,
- * as it is checking the dummy slot
- */
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
- if (!!key_idx &
- !rte_hash_cmp_eq(
- key_slot->key, keys[i], h)) {
- if (data != NULL)
- data[i] = pdata[i];
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
- hits |= 1ULL << i;
- positions[i] = key_idx - 1;
- goto next_key;
- }
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
}
-next_key:
- continue;
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
}
-
- /* The loads of sig_current in compare_signatures
- * should not move below the load from tbl_chng_cnt.
- */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
- /* Re-read the table change counter to check if the
- * table has changed during search. If yes, re-do
- * the search.
- * This load should not get hoisted. The load
- * acquires on cnt_b, primary key index and secondary
- * key index will make sure that it does not get
- * hoisted.
- */
- cnt_a = __atomic_load_n(h->tbl_chng_cnt,
- __ATOMIC_ACQUIRE);
- } while (cnt_b != cnt_a);
+next_key:
+ continue;
+ }
/* all found, do not need to go through ext bkt */
if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
--
2.17.1
next prev parent reply other threads:[~2018-11-09 16:39 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-09 16:39 [dpdk-dev] [PATCH 0/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 1/4] hash: prepare for lock-free and rw-lock separation Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 2/4] hash: remove rw-lock calls from lock-free functions Honnappa Nagarahalli
2018-11-09 16:39 ` Honnappa Nagarahalli [this message]
2018-11-10 8:51 ` [dpdk-dev] [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns Jerin Jacob
2018-11-10 18:58 ` Honnappa Nagarahalli
2018-11-09 16:39 ` [dpdk-dev] [PATCH 4/4] hash: separate lf and rw lock lookup code paths Honnappa Nagarahalli
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 0/1] " Honnappa Nagarahalli
2018-11-10 18:55 ` [dpdk-dev] [PATCH v2 1/1] " Honnappa Nagarahalli
2018-11-11 7:48 ` Jerin Jacob
2018-11-13 16:37 ` Thomas Monjalon
2018-11-11 21:43 ` Wang, Yipeng1
2018-11-12 4:50 ` Honnappa Nagarahalli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181109163917.16845-4-honnappa.nagarahalli@arm.com \
--to=honnappa.nagarahalli@arm.com \
--cc=bruce.richardson@intel.com \
--cc=chaozhu@linux.vnet.ibm.com \
--cc=dev@dpdk.org \
--cc=dharmik.thakkar@arm.com \
--cc=gavin.hu@arm.com \
--cc=hemant.agrawal@nxp.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=nd@arm.com \
--cc=pablo.de.lara.guarch@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).