From: Yoan Picchi <yoan.picchi@arm.com>
To: Thomas Monjalon <thomas@monjalon.net>,
Yipeng Wang <yipeng1.wang@intel.com>,
Sameh Gobriel <sameh.gobriel@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Cc: dev@dpdk.org, nd@arm.com, Yoan Picchi <yoan.picchi@arm.com>,
Ruifeng Wang <ruifeng.wang@arm.com>,
Nathan Brown <nathan.brown@arm.com>
Subject: [PATCH v4 1/4] hash: pack the hitmask for hash in bulk lookup
Date: Mon, 26 Feb 2024 17:02:00 +0000 [thread overview]
Message-ID: <20240226170203.2881280-2-yoan.picchi@arm.com> (raw)
In-Reply-To: <20240226170203.2881280-1-yoan.picchi@arm.com>
Current hitmask includes padding due to Intel's SIMD
implementation detail. This patch allows non Intel SIMD
implementations to benefit from a dense hitmask.
Signed-off-by: Yoan Picchi <yoan.picchi@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Nathan Brown <nathan.brown@arm.com>
---
.mailmap | 2 +
lib/hash/rte_cuckoo_hash.c | 118 ++++++++++++++++++++++++++-----------
2 files changed, 86 insertions(+), 34 deletions(-)
diff --git a/.mailmap b/.mailmap
index 12d2875641..60500bbe36 100644
--- a/.mailmap
+++ b/.mailmap
@@ -492,6 +492,7 @@ Hari Kumar Vemula <hari.kumarx.vemula@intel.com>
Harini Ramakrishnan <harini.ramakrishnan@microsoft.com>
Hariprasad Govindharajan <hariprasad.govindharajan@intel.com>
Harish Patil <harish.patil@cavium.com> <harish.patil@qlogic.com>
+Harjot Singh <harjot.singh@arm.com>
Harman Kalra <hkalra@marvell.com>
Harneet Singh <harneet.singh@intel.com>
Harold Huang <baymaxhuang@gmail.com>
@@ -1625,6 +1626,7 @@ Yixue Wang <yixue.wang@intel.com>
Yi Yang <yangyi01@inspur.com> <yi.y.yang@intel.com>
Yi Zhang <zhang.yi75@zte.com.cn>
Yoann Desmouceaux <ydesmouc@cisco.com>
+Yoan Picchi <yoan.picchi@arm.com>
Yogesh Jangra <yogesh.jangra@intel.com>
Yogev Chaimovich <yogev@cgstowernetworks.com>
Yongjie Gu <yongjiex.gu@intel.com>
diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index 9cf94645f6..0550165584 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -1857,8 +1857,50 @@ rte_hash_free_key_with_position(const struct rte_hash *h,
}
+#if defined(__ARM_NEON)
+
+static inline void
+compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
+ const struct rte_hash_bucket *prim_bkt,
+ const struct rte_hash_bucket *sec_bkt,
+ uint16_t sig,
+ enum rte_hash_sig_compare_function sig_cmp_fn)
+{
+ unsigned int i;
+
+ /* For match mask every bits indicates the match */
+ switch (sig_cmp_fn) {
+ case RTE_HASH_COMPARE_NEON: {
+ uint16x8_t vmat, vsig, x;
+ int16x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ vsig = vld1q_dup_u16((uint16_t const *)&sig);
+ /* Compare all signatures in the primary bucket */
+ vmat = vceqq_u16(vsig,
+ vld1q_u16((uint16_t const *)prim_bkt->sig_current));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift);
+ *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
+ /* Compare all signatures in the secondary bucket */
+ vmat = vceqq_u16(vsig,
+ vld1q_u16((uint16_t const *)sec_bkt->sig_current));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift);
+ *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
+ }
+ break;
+ default:
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ *prim_hash_matches |=
+ ((sig == prim_bkt->sig_current[i]) << i);
+ *sec_hash_matches |=
+ ((sig == sec_bkt->sig_current[i]) << i);
+ }
+ }
+}
+
+#else
+
static inline void
-compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
+compare_signatures_sparse(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
const struct rte_hash_bucket *prim_bkt,
const struct rte_hash_bucket *sec_bkt,
uint16_t sig,
@@ -1885,25 +1927,7 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
/* Extract the even-index bits only */
*sec_hash_matches &= 0x5555;
break;
-#elif defined(__ARM_NEON)
- case RTE_HASH_COMPARE_NEON: {
- uint16x8_t vmat, vsig, x;
- int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
-
- vsig = vld1q_dup_u16((uint16_t const *)&sig);
- /* Compare all signatures in the primary bucket */
- vmat = vceqq_u16(vsig,
- vld1q_u16((uint16_t const *)prim_bkt->sig_current));
- x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
- *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
- /* Compare all signatures in the secondary bucket */
- vmat = vceqq_u16(vsig,
- vld1q_u16((uint16_t const *)sec_bkt->sig_current));
- x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
- *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
- }
- break;
-#endif
+#endif /* defined(__SSE2__) */
default:
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
*prim_hash_matches |=
@@ -1914,6 +1938,8 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
}
}
+#endif /* defined(__ARM_NEON) */
+
static inline void
__bulk_lookup_l(const struct rte_hash *h, const void **keys,
const struct rte_hash_bucket **primary_bkt,
@@ -1928,18 +1954,30 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
+#if defined(__ARM_NEON)
+ const int hitmask_padding = 0;
+#else
+ const int hitmask_padding = 1;
+#endif
+
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+#if defined(__ARM_NEON)
+ compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+#else
+ compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+#endif
if (prim_hitmask[i]) {
uint32_t first_hit =
rte_ctz32(prim_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -1953,7 +1991,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
if (sec_hitmask[i]) {
uint32_t first_hit =
rte_ctz32(sec_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -1970,7 +2008,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
while (prim_hitmask[i]) {
uint32_t hit_index =
rte_ctz32(prim_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[hit_index];
const struct rte_hash_key *key_slot =
@@ -1992,13 +2030,13 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
}
while (sec_hitmask[i]) {
uint32_t hit_index =
rte_ctz32(sec_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[hit_index];
const struct rte_hash_key *key_slot =
@@ -2021,7 +2059,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
@@ -2076,6 +2114,12 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
+#if defined(__ARM_NEON)
+ const int hitmask_padding = 0;
+#else
+ const int hitmask_padding = 1;
+#endif
+
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2089,14 +2133,20 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+#if defined(__ARM_NEON)
+ compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+#else
+ compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+#endif
if (prim_hitmask[i]) {
uint32_t first_hit =
rte_ctz32(prim_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -2110,7 +2160,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
if (sec_hitmask[i]) {
uint32_t first_hit =
rte_ctz32(sec_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -2126,7 +2176,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
while (prim_hitmask[i]) {
uint32_t hit_index =
rte_ctz32(prim_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
rte_atomic_load_explicit(
&primary_bkt[i]->key_idx[hit_index],
@@ -2152,13 +2202,13 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
}
while (sec_hitmask[i]) {
uint32_t hit_index =
rte_ctz32(sec_hitmask[i])
- >> 1;
+ >> hitmask_padding;
uint32_t key_idx =
rte_atomic_load_explicit(
&secondary_bkt[i]->key_idx[hit_index],
@@ -2185,7 +2235,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
--
2.25.1
next prev parent reply other threads:[~2024-02-27 6:03 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-07 12:18 [PATCH v3 0/4] hash: add SVE support for bulk key lookup Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 2/4] hash: optimize compare signature for NEON Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-02-26 17:01 ` [PATCH v4 0/4] " Yoan Picchi
2024-02-26 17:02 ` Yoan Picchi [this message]
2024-02-26 17:02 ` [PATCH v4 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-02-23 13:26 [PATCH v4 0/4] " Yoan Picchi
2024-02-23 13:26 ` [PATCH v4 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240226170203.2881280-2-yoan.picchi@arm.com \
--to=yoan.picchi@arm.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=nathan.brown@arm.com \
--cc=nd@arm.com \
--cc=ruifeng.wang@arm.com \
--cc=sameh.gobriel@intel.com \
--cc=thomas@monjalon.net \
--cc=vladimir.medvedkin@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).