From: Yoan Picchi <yoan.picchi@arm.com>
To: Thomas Monjalon <thomas@monjalon.net>,
Yipeng Wang <yipeng1.wang@intel.com>,
Sameh Gobriel <sameh.gobriel@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Cc: Nathan Brown <nathan.brown@arm.com>,
Ruifeng Wang <ruifeng.wang@arm.com>,
dev@dpdk.org, Yoan Picchi <yoan.picchi@arm.com>,
Harjot Singh <harjot.singh@arm.com>
Subject: [PATCH v3 4/4] hash: add SVE support for bulk key lookup
Date: Tue, 7 Nov 2023 12:18:45 +0000 [thread overview]
Message-ID: <20231107121845.2758454-5-yoan.picchi@arm.com> (raw)
In-Reply-To: <20231107121845.2758454-1-yoan.picchi@arm.com>
- Implemented SVE code for comparing signatures in bulk lookup.
- Added Defines in code for SVE code support.
- Optimise NEON code
- New SVE code is ~5% slower than optimized NEON for N2 processor.
Signed-off-by: Yoan Picchi <yoan.picchi@arm.com>
Signed-off-by: Harjot Singh <harjot.singh@arm.com>
Reviewed-by: Nathan Brown <nathan.brown@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
---
lib/hash/rte_cuckoo_hash.c | 196 ++++++++++++++++++++++++++++---------
lib/hash/rte_cuckoo_hash.h | 1 +
2 files changed, 151 insertions(+), 46 deletions(-)
diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index a4b907c45c..61637d02eb 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -435,8 +435,11 @@ rte_hash_create(const struct rte_hash_parameters *params)
h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
else
#elif defined(RTE_ARCH_ARM64)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
+ h->sig_cmp_fn = RTE_HASH_COMPARE_SVE;
+ }
else
#endif
h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
@@ -1853,37 +1856,103 @@ rte_hash_free_key_with_position(const struct rte_hash *h,
#if defined(__ARM_NEON)
static inline void
-compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
- const struct rte_hash_bucket *prim_bkt,
- const struct rte_hash_bucket *sec_bkt,
+compare_signatures_dense(uint16_t *hitmask_buffer,
+ const uint16_t *prim_bucket_sigs,
+ const uint16_t *sec_bucket_sigs,
uint16_t sig,
enum rte_hash_sig_compare_function sig_cmp_fn)
{
unsigned int i;
+ static_assert(sizeof(*hitmask_buffer) >= 2*(RTE_HASH_BUCKET_ENTRIES/8),
+ "The hitmask must be exactly wide enough to accept the whole hitmask if it is dense");
+
/* For match mask every bits indicates the match */
switch (sig_cmp_fn) {
+#if RTE_HASH_BUCKET_ENTRIES <= 8
case RTE_HASH_COMPARE_NEON: {
- uint16x8_t vmat, x;
+ uint16x8_t vmat, hit1, hit2;
const uint16x8_t mask = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};
const uint16x8_t vsig = vld1q_dup_u16((uint16_t const *)&sig);
/* Compare all signatures in the primary bucket */
- vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bkt->sig_current));
- x = vandq_u16(vmat, mask);
- *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
+ vmat = vceqq_u16(vsig, vld1q_u16(prim_bucket_sigs));
+ hit1 = vandq_u16(vmat, mask);
+
/* Compare all signatures in the secondary bucket */
- vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bkt->sig_current));
- x = vandq_u16(vmat, mask);
- *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
+ vmat = vceqq_u16(vsig, vld1q_u16(sec_bucket_sigs));
+ hit2 = vandq_u16(vmat, mask);
+
+ hit2 = vshlq_n_u16(hit2, RTE_HASH_BUCKET_ENTRIES);
+ hit2 = vorrq_u16(hit1, hit2);
+ *hitmask_buffer = vaddvq_u16(hit2);
+ }
+ break;
+#endif
+#if defined(RTE_HAS_SVE_ACLE)
+ case RTE_HASH_COMPARE_SVE: {
+ svuint16_t vsign, shift, sv_matches;
+ svbool_t pred, match, bucket_wide_pred;
+ int i = 0;
+ uint64_t vl = svcnth();
+
+ vsign = svdup_u16(sig);
+ shift = svindex_u16(0, 1);
+
+ if (vl >= 2 * RTE_HASH_BUCKET_ENTRIES && RTE_HASH_BUCKET_ENTRIES <= 8) {
+ svuint16_t primary_array_vect, secondary_array_vect;
+ bucket_wide_pred = svwhilelt_b16(0, RTE_HASH_BUCKET_ENTRIES);
+ primary_array_vect = svld1_u16(bucket_wide_pred, prim_bucket_sigs);
+ secondary_array_vect = svld1_u16(bucket_wide_pred, sec_bucket_sigs);
+
+ /* We merged the two vectors so we can do both comparison at once */
+ primary_array_vect = svsplice_u16(bucket_wide_pred,
+ primary_array_vect,
+ secondary_array_vect);
+ pred = svwhilelt_b16(0, 2*RTE_HASH_BUCKET_ENTRIES);
+
+ /* Compare all signatures in the buckets */
+ match = svcmpeq_u16(pred, vsign, primary_array_vect);
+ if (svptest_any(svptrue_b16(), match)) {
+ sv_matches = svdup_u16(1);
+ sv_matches = svlsl_u16_z(match, sv_matches, shift);
+ *hitmask_buffer = svorv_u16(svptrue_b16(), sv_matches);
+ }
+ } else {
+ do {
+ pred = svwhilelt_b16(i, RTE_HASH_BUCKET_ENTRIES);
+ uin16_t lower_half = 0;
+ uin16_t upper_half = 0;
+ /* Compare all signatures in the primary bucket */
+ match = svcmpeq_u16(pred, vsign, svld1_u16(pred,
+ &prim_bucket_sigs[i]));
+ if (svptest_any(svptrue_b16(), match)) {
+ sv_matches = svdup_u16(1);
+ sv_matches = svlsl_u16_z(match, sv_matches, shift);
+ lower_half = svorv_u16(svptrue_b16(), sv_matches);
+ }
+ /* Compare all signatures in the secondary bucket */
+ match = svcmpeq_u16(pred, vsign, svld1_u16(pred,
+ &sec_bucket_sigs[i]));
+ if (svptest_any(svptrue_b16(), match)) {
+ sv_matches = svdup_u16(1);
+ sv_matches = svlsl_u16_z(match, sv_matches, shift);
+ upper_half = svorv_u16(svptrue_b16(), sv_matches)
+ << RTE_HASH_BUCKET_ENTRIES;
+ }
+ hitmask_buffer[i/8] = upper_half | lower_half;
+ i += vl;
+ } while (i < RTE_HASH_BUCKET_ENTRIES);
+ }
}
break;
+#endif
default:
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- *prim_hash_matches |=
- ((sig == prim_bkt->sig_current[i]) << i);
- *sec_hash_matches |=
- ((sig == sec_bkt->sig_current[i]) << i);
+ *hitmask_buffer |=
+ ((sig == prim_bucket_sigs[i]) << i);
+ *hitmask_buffer |=
+ ((sig == sec_bucket_sigs[i]) << i) << RTE_HASH_BUCKET_ENTRIES;
}
}
}
@@ -1901,7 +1970,7 @@ compare_signatures_sparse(uint32_t *prim_hash_matches, uint32_t *sec_hash_matche
/* For match mask the first bit of every two bits indicates the match */
switch (sig_cmp_fn) {
-#if defined(__SSE2__)
+#if defined(__SSE2__) && RTE_HASH_BUCKET_ENTRIES <= 8
case RTE_HASH_COMPARE_SSE:
/* Compare all signatures in the bucket */
*prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
@@ -1941,14 +2010,18 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
- uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
#if defined(__ARM_NEON)
const int hitmask_padding = 0;
+ uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+
+ static_assert(sizeof(*hitmask_buffer)*8/2 == RTE_HASH_BUCKET_ENTRIES,
+ "The hitmask must be exactly wide enough to accept the whole hitmask when it is dense");
#else
const int hitmask_padding = 1;
+ uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
#endif
__hash_rw_reader_lock(h);
@@ -1956,18 +2029,24 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
#if defined(__ARM_NEON)
- compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
- primary_bkt[i], secondary_bkt[i],
+ uint16_t *hitmask = &hitmask_buffer[i];
+ compare_signatures_dense(hitmask,
+ primary_bkt[i]->sig_current,
+ secondary_bkt[i]->sig_current,
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
#else
- compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
+ compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ const unsigned int sec_hitmask = sec_hitmask_buffer[i];
#endif
- if (prim_hitmask[i]) {
+ if (prim_hitmask) {
uint32_t first_hit =
- __builtin_ctzl(prim_hitmask[i])
+ __builtin_ctzl(prim_hitmask)
>> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
@@ -1979,9 +2058,9 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
continue;
}
- if (sec_hitmask[i]) {
+ if (sec_hitmask) {
uint32_t first_hit =
- __builtin_ctzl(sec_hitmask[i])
+ __builtin_ctzl(sec_hitmask)
>> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
@@ -1996,9 +2075,17 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
/* Compare keys, first hits in primary first */
for (i = 0; i < num_keys; i++) {
positions[i] = -ENOENT;
- while (prim_hitmask[i]) {
+#if defined(__ARM_NEON)
+ uint16_t *hitmask = &hitmask_buffer[i];
+ unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
+ while (prim_hitmask) {
uint32_t hit_index =
- __builtin_ctzl(prim_hitmask[i])
+ __builtin_ctzl(prim_hitmask)
>> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[hit_index];
@@ -2021,12 +2108,12 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
+ prim_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
- while (sec_hitmask[i]) {
+ while (sec_hitmask) {
uint32_t hit_index =
- __builtin_ctzl(sec_hitmask[i])
+ __builtin_ctzl(sec_hitmask)
>> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[hit_index];
@@ -2050,7 +2137,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
+ sec_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
@@ -2100,15 +2187,18 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
- uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
#if defined(__ARM_NEON)
const int hitmask_padding = 0;
+ uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ static_assert(sizeof(*hitmask_buffer)*8/2 == RTE_HASH_BUCKET_ENTRIES,
+ "The hitmask must be exactly wide enough to accept the whole hitmask chen it is dense");
#else
const int hitmask_padding = 1;
+ uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
#endif
for (i = 0; i < num_keys; i++)
@@ -2125,18 +2215,24 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
#if defined(__ARM_NEON)
- compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
- primary_bkt[i], secondary_bkt[i],
+ uint16_t *hitmask = &hitmask_buffer[i];
+ compare_signatures_dense(hitmask,
+ primary_bkt[i]->sig_current,
+ secondary_bkt[i]->sig_current,
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
#else
- compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
+ compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ const unsigned int sec_hitmask = sec_hitmask_buffer[i];
#endif
- if (prim_hitmask[i]) {
+ if (prim_hitmask) {
uint32_t first_hit =
- __builtin_ctzl(prim_hitmask[i])
+ __builtin_ctzl(prim_hitmask)
>> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
@@ -2148,9 +2244,9 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
continue;
}
- if (sec_hitmask[i]) {
+ if (sec_hitmask) {
uint32_t first_hit =
- __builtin_ctzl(sec_hitmask[i])
+ __builtin_ctzl(sec_hitmask)
>> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
@@ -2164,9 +2260,17 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* Compare keys, first hits in primary first */
for (i = 0; i < num_keys; i++) {
- while (prim_hitmask[i]) {
+#if defined(__ARM_NEON)
+ uint16_t *hitmask = &hitmask_buffer[i];
+ unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
+ while (prim_hitmask) {
uint32_t hit_index =
- __builtin_ctzl(prim_hitmask[i])
+ __builtin_ctzl(prim_hitmask)
>> hitmask_padding;
uint32_t key_idx =
__atomic_load_n(
@@ -2193,12 +2297,12 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
+ prim_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
- while (sec_hitmask[i]) {
+ while (sec_hitmask) {
uint32_t hit_index =
- __builtin_ctzl(sec_hitmask[i])
+ __builtin_ctzl(sec_hitmask)
>> hitmask_padding;
uint32_t key_idx =
__atomic_load_n(
@@ -2226,7 +2330,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(1 << (hit_index << hitmask_padding));
+ sec_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h
index eb2644f74b..356ec2a69e 100644
--- a/lib/hash/rte_cuckoo_hash.h
+++ b/lib/hash/rte_cuckoo_hash.h
@@ -148,6 +148,7 @@ enum rte_hash_sig_compare_function {
RTE_HASH_COMPARE_SCALAR = 0,
RTE_HASH_COMPARE_SSE,
RTE_HASH_COMPARE_NEON,
+ RTE_HASH_COMPARE_SVE,
RTE_HASH_COMPARE_NUM
};
--
2.25.1
next prev parent reply other threads:[~2023-11-07 13:05 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-07 12:18 [PATCH v3 0/4] " Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 2/4] hash: optimize compare signature for NEON Yoan Picchi
2023-11-07 12:18 ` [PATCH v3 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2023-11-07 12:18 ` Yoan Picchi [this message]
2024-02-26 17:01 ` [PATCH v4 0/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-02-26 17:02 ` [PATCH v4 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231107121845.2758454-5-yoan.picchi@arm.com \
--to=yoan.picchi@arm.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=harjot.singh@arm.com \
--cc=nathan.brown@arm.com \
--cc=ruifeng.wang@arm.com \
--cc=sameh.gobriel@intel.com \
--cc=thomas@monjalon.net \
--cc=vladimir.medvedkin@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).