From: Yoan Picchi <yoan.picchi@arm.com>
To: Thomas Monjalon <thomas@monjalon.net>,
Yipeng Wang <yipeng1.wang@intel.com>,
Sameh Gobriel <sameh.gobriel@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Cc: dev@dpdk.org, nd@arm.com, Yoan Picchi <yoan.picchi@arm.com>,
Ruifeng Wang <ruifeng.wang@arm.com>,
Nathan Brown <nathan.brown@arm.com>
Subject: [PATCH v10 1/4] hash: pack the hitmask for hash in bulk lookup
Date: Wed, 3 Jul 2024 17:13:12 +0000 [thread overview]
Message-ID: <20240703171315.1470547-2-yoan.picchi@arm.com> (raw)
In-Reply-To: <20240703171315.1470547-1-yoan.picchi@arm.com>
Current hitmask includes padding due to Intel's SIMD
implementation detail. This patch allows non Intel SIMD
implementations to benefit from a dense hitmask.
In addition, the new dense hitmask interweave the primary
and secondary matches which allow a better cache usage and
enable future improvements for the SIMD implementations
The default non SIMD path now use this dense mask.
Signed-off-by: Yoan Picchi <yoan.picchi@arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Nathan Brown <nathan.brown@arm.com>
---
.mailmap | 1 +
lib/hash/compare_signatures_arm_pvt.h | 60 +++++++
lib/hash/compare_signatures_generic_pvt.h | 37 +++++
lib/hash/compare_signatures_x86_pvt.h | 49 ++++++
lib/hash/hash_sig_cmp_func_pvt.h | 20 +++
lib/hash/rte_cuckoo_hash.c | 190 +++++++++++-----------
lib/hash/rte_cuckoo_hash.h | 10 +-
7 files changed, 267 insertions(+), 100 deletions(-)
create mode 100644 lib/hash/compare_signatures_arm_pvt.h
create mode 100644 lib/hash/compare_signatures_generic_pvt.h
create mode 100644 lib/hash/compare_signatures_x86_pvt.h
create mode 100644 lib/hash/hash_sig_cmp_func_pvt.h
diff --git a/.mailmap b/.mailmap
index f76037213d..ec525981fe 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1661,6 +1661,7 @@ Yixue Wang <yixue.wang@intel.com>
Yi Yang <yangyi01@inspur.com> <yi.y.yang@intel.com>
Yi Zhang <zhang.yi75@zte.com.cn>
Yoann Desmouceaux <ydesmouc@cisco.com>
+Yoan Picchi <yoan.picchi@arm.com>
Yogesh Jangra <yogesh.jangra@intel.com>
Yogev Chaimovich <yogev@cgstowernetworks.com>
Yongjie Gu <yongjiex.gu@intel.com>
diff --git a/lib/hash/compare_signatures_arm_pvt.h b/lib/hash/compare_signatures_arm_pvt.h
new file mode 100644
index 0000000000..e83bae9912
--- /dev/null
+++ b/lib/hash/compare_signatures_arm_pvt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2018-2024 Arm Limited
+ */
+
+/*
+ * Arm's version uses a densely packed hitmask buffer:
+ * Every bit is in use.
+ */
+
+#include <inttypes.h>
+#include <rte_common.h>
+#include <rte_vect.h>
+
+#include "rte_cuckoo_hash.h"
+#include "hash_sig_cmp_func_pvt.h"
+
+#define DENSE_HASH_BULK_LOOKUP 1
+
+static inline void
+compare_signatures_dense(uint16_t *hitmask_buffer,
+ const uint16_t *prim_bucket_sigs,
+ const uint16_t *sec_bucket_sigs,
+ uint16_t sig,
+ enum rte_hash_sig_compare_function sig_cmp_fn)
+{
+
+ static_assert(sizeof(*hitmask_buffer) >= 2 * (RTE_HASH_BUCKET_ENTRIES / 8),
+ "hitmask_buffer must be wide enough to fit a dense hitmask");
+
+ /* For match mask every bits indicates the match */
+ switch (sig_cmp_fn) {
+#if RTE_HASH_BUCKET_ENTRIES <= 8
+ case RTE_HASH_COMPARE_NEON: {
+ uint16x8_t vmat, vsig, x;
+ int16x8_t shift = {0, 1, 2, 3, 4, 5, 6, 7};
+ uint16_t low, high;
+
+ vsig = vld1q_dup_u16((uint16_t const *)&sig);
+ /* Compare all signatures in the primary bucket */
+ vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bucket_sigs));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift);
+ low = (uint16_t)(vaddvq_u16(x));
+ /* Compare all signatures in the secondary bucket */
+ vmat = vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bucket_sigs));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x0001)), shift);
+ high = (uint16_t)(vaddvq_u16(x));
+ *hitmask_buffer = low | high << RTE_HASH_BUCKET_ENTRIES;
+
+ }
+ break;
+#endif
+ default:
+ for (unsigned int i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ *hitmask_buffer |= (sig == prim_bucket_sigs[i]) << i;
+ *hitmask_buffer |=
+ ((sig == sec_bucket_sigs[i]) << i) << RTE_HASH_BUCKET_ENTRIES;
+ }
+ }
+}
diff --git a/lib/hash/compare_signatures_generic_pvt.h b/lib/hash/compare_signatures_generic_pvt.h
new file mode 100644
index 0000000000..18c2f651c4
--- /dev/null
+++ b/lib/hash/compare_signatures_generic_pvt.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2018-2024 Arm Limited
+ */
+
+/*
+ * The generic version could use either a dense or sparsely packed hitmask buffer,
+ * but the dense one is slightly faster.
+ */
+
+#include <inttypes.h>
+#include <rte_common.h>
+#include <rte_vect.h>
+
+#include "rte_cuckoo_hash.h"
+#include "hash_sig_cmp_func_pvt.h"
+
+#define DENSE_HASH_BULK_LOOKUP 1
+
+static inline void
+compare_signatures_dense(uint16_t *hitmask_buffer,
+ const uint16_t *prim_bucket_sigs,
+ const uint16_t *sec_bucket_sigs,
+ uint16_t sig,
+ __rte_unused enum rte_hash_sig_compare_function sig_cmp_fn)
+{
+
+ static_assert(sizeof(*hitmask_buffer) >= 2 * (RTE_HASH_BUCKET_ENTRIES / 8),
+ "hitmask_buffer must be wide enough to fit a dense hitmask");
+
+ /* For match mask every bits indicates the match */
+ for (unsigned int i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ *hitmask_buffer |= (sig == prim_bucket_sigs[i]) << i;
+ *hitmask_buffer |= ((sig == sec_bucket_sigs[i]) << i) << RTE_HASH_BUCKET_ENTRIES;
+ }
+
+}
diff --git a/lib/hash/compare_signatures_x86_pvt.h b/lib/hash/compare_signatures_x86_pvt.h
new file mode 100644
index 0000000000..932912ba19
--- /dev/null
+++ b/lib/hash/compare_signatures_x86_pvt.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2018-2024 Arm Limited
+ */
+
+/*
+ * x86's version uses a sparsely packed hitmask buffer:
+ * Every other bit is padding.
+ */
+
+#include <inttypes.h>
+#include <rte_common.h>
+#include <rte_vect.h>
+
+#include "rte_cuckoo_hash.h"
+#include "hash_sig_cmp_func_pvt.h"
+
+#define DENSE_HASH_BULK_LOOKUP 0
+
+static inline void
+compare_signatures_sparse(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
+ const struct rte_hash_bucket *prim_bkt,
+ const struct rte_hash_bucket *sec_bkt,
+ uint16_t sig,
+ enum rte_hash_sig_compare_function sig_cmp_fn)
+{
+ /* For match mask the first bit of every two bits indicates the match */
+ switch (sig_cmp_fn) {
+#if defined(__SSE2__) && RTE_HASH_BUCKET_ENTRIES <= 8
+ case RTE_HASH_COMPARE_SSE:
+ /* Compare all signatures in the bucket */
+ *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(_mm_load_si128(
+ (__m128i const *)prim_bkt->sig_current), _mm_set1_epi16(sig)));
+ /* Extract the even-index bits only */
+ *prim_hash_matches &= 0x5555;
+ /* Compare all signatures in the bucket */
+ *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(_mm_load_si128(
+ (__m128i const *)sec_bkt->sig_current), _mm_set1_epi16(sig)));
+ /* Extract the even-index bits only */
+ *sec_hash_matches &= 0x5555;
+ break;
+#endif /* defined(__SSE2__) */
+ default:
+ for (unsigned int i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ *prim_hash_matches |= (sig == prim_bkt->sig_current[i]) << (i << 1);
+ *sec_hash_matches |= (sig == sec_bkt->sig_current[i]) << (i << 1);
+ }
+ }
+}
diff --git a/lib/hash/hash_sig_cmp_func_pvt.h b/lib/hash/hash_sig_cmp_func_pvt.h
new file mode 100644
index 0000000000..d8d2fbffaf
--- /dev/null
+++ b/lib/hash/hash_sig_cmp_func_pvt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Arm Limited
+ */
+
+#ifndef _SIG_CMP_FUNC_H_
+#define _SIG_CMP_FUNC_H_
+
+/** Enum used to select the implementation of the signature comparison function to use
+ * eg: A system supporting SVE might want to use a NEON implementation.
+ * Those may change and are for internal use only
+ */
+enum rte_hash_sig_compare_function {
+ RTE_HASH_COMPARE_SCALAR = 0,
+ RTE_HASH_COMPARE_SSE,
+ RTE_HASH_COMPARE_NEON,
+ RTE_HASH_COMPARE_SVE,
+ RTE_HASH_COMPARE_NUM
+};
+
+#endif
diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
index d87aa52b5b..61cc12d83b 100644
--- a/lib/hash/rte_cuckoo_hash.c
+++ b/lib/hash/rte_cuckoo_hash.c
@@ -32,6 +32,15 @@ RTE_LOG_REGISTER_DEFAULT(hash_logtype, INFO);
RTE_LOG_LINE(level, HASH, "" __VA_ARGS__)
#include "rte_cuckoo_hash.h"
+#include "hash_sig_cmp_func_pvt.h"
+
+#if defined(__ARM_NEON)
+#include "compare_signatures_arm_pvt.h"
+#elif defined(__SSE2__)
+#include "compare_signatures_x86_pvt.h"
+#else
+#include "compare_signatures_generic_pvt.h"
+#endif
/* Mask of all flags supported by this version */
#define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
@@ -1880,63 +1889,6 @@ rte_hash_free_key_with_position(const struct rte_hash *h,
}
-static inline void
-compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
- const struct rte_hash_bucket *prim_bkt,
- const struct rte_hash_bucket *sec_bkt,
- uint16_t sig,
- enum rte_hash_sig_compare_function sig_cmp_fn)
-{
- unsigned int i;
-
- /* For match mask the first bit of every two bits indicates the match */
- switch (sig_cmp_fn) {
-#if defined(__SSE2__)
- case RTE_HASH_COMPARE_SSE:
- /* Compare all signatures in the bucket */
- *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
- _mm_load_si128(
- (__m128i const *)prim_bkt->sig_current),
- _mm_set1_epi16(sig)));
- /* Extract the even-index bits only */
- *prim_hash_matches &= 0x5555;
- /* Compare all signatures in the bucket */
- *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
- _mm_load_si128(
- (__m128i const *)sec_bkt->sig_current),
- _mm_set1_epi16(sig)));
- /* Extract the even-index bits only */
- *sec_hash_matches &= 0x5555;
- break;
-#elif defined(__ARM_NEON)
- case RTE_HASH_COMPARE_NEON: {
- uint16x8_t vmat, vsig, x;
- int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
-
- vsig = vld1q_dup_u16((uint16_t const *)&sig);
- /* Compare all signatures in the primary bucket */
- vmat = vceqq_u16(vsig,
- vld1q_u16((uint16_t const *)prim_bkt->sig_current));
- x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
- *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
- /* Compare all signatures in the secondary bucket */
- vmat = vceqq_u16(vsig,
- vld1q_u16((uint16_t const *)sec_bkt->sig_current));
- x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
- *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
- }
- break;
-#endif
- default:
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- *prim_hash_matches |=
- ((sig == prim_bkt->sig_current[i]) << (i << 1));
- *sec_hash_matches |=
- ((sig == sec_bkt->sig_current[i]) << (i << 1));
- }
- }
-}
-
static inline void
__bulk_lookup_l(const struct rte_hash *h, const void **keys,
const struct rte_hash_bucket **primary_bkt,
@@ -1947,22 +1899,41 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
- uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
+#if DENSE_HASH_BULK_LOOKUP
+ const int hitmask_padding = 0;
+ uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+#else
+ const int hitmask_padding = 1;
+ uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+#endif
+
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+#if DENSE_HASH_BULK_LOOKUP
+ uint16_t *hitmask = &hitmask_buffer[i];
+ compare_signatures_dense(hitmask,
+ primary_bkt[i]->sig_current,
+ secondary_bkt[i]->sig_current,
+ sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ const unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
- if (prim_hitmask[i]) {
+ if (prim_hitmask) {
uint32_t first_hit =
- rte_ctz32(prim_hitmask[i])
- >> 1;
+ rte_ctz32(prim_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -1973,10 +1944,10 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
continue;
}
- if (sec_hitmask[i]) {
+ if (sec_hitmask) {
uint32_t first_hit =
- rte_ctz32(sec_hitmask[i])
- >> 1;
+ rte_ctz32(sec_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -1990,10 +1961,18 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
/* Compare keys, first hits in primary first */
for (i = 0; i < num_keys; i++) {
positions[i] = -ENOENT;
- while (prim_hitmask[i]) {
+#if DENSE_HASH_BULK_LOOKUP
+ uint16_t *hitmask = &hitmask_buffer[i];
+ unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
+ while (prim_hitmask) {
uint32_t hit_index =
- rte_ctz32(prim_hitmask[i])
- >> 1;
+ rte_ctz32(prim_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[hit_index];
const struct rte_hash_key *key_slot =
@@ -2015,13 +1994,13 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ prim_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
- while (sec_hitmask[i]) {
+ while (sec_hitmask) {
uint32_t hit_index =
- rte_ctz32(sec_hitmask[i])
- >> 1;
+ rte_ctz32(sec_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[hit_index];
const struct rte_hash_key *key_slot =
@@ -2044,7 +2023,7 @@ __bulk_lookup_l(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ sec_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
@@ -2094,11 +2073,20 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
- uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
+#if DENSE_HASH_BULK_LOOKUP
+ const int hitmask_padding = 0;
+ uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ static_assert(sizeof(*hitmask_buffer)*8/2 == RTE_HASH_BUCKET_ENTRIES,
+ "The hitmask must be exactly wide enough to accept the whole hitmask chen it is dense");
+#else
+ const int hitmask_padding = 1;
+ uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+#endif
+
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
@@ -2112,14 +2100,26 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
- compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+#if DENSE_HASH_BULK_LOOKUP
+ uint16_t *hitmask = &hitmask_buffer[i];
+ compare_signatures_dense(hitmask,
+ primary_bkt[i]->sig_current,
+ secondary_bkt[i]->sig_current,
+ sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ const unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer[i],
primary_bkt[i], secondary_bkt[i],
sig[i], h->sig_cmp_fn);
+ const unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ const unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
- if (prim_hitmask[i]) {
+ if (prim_hitmask) {
uint32_t first_hit =
- rte_ctz32(prim_hitmask[i])
- >> 1;
+ rte_ctz32(prim_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
primary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -2130,10 +2130,10 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
continue;
}
- if (sec_hitmask[i]) {
+ if (sec_hitmask) {
uint32_t first_hit =
- rte_ctz32(sec_hitmask[i])
- >> 1;
+ rte_ctz32(sec_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
secondary_bkt[i]->key_idx[first_hit];
const struct rte_hash_key *key_slot =
@@ -2146,10 +2146,18 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* Compare keys, first hits in primary first */
for (i = 0; i < num_keys; i++) {
- while (prim_hitmask[i]) {
+#if DENSE_HASH_BULK_LOOKUP
+ uint16_t *hitmask = &hitmask_buffer[i];
+ unsigned int prim_hitmask = *(uint8_t *)(hitmask);
+ unsigned int sec_hitmask = *((uint8_t *)(hitmask)+1);
+#else
+ unsigned int prim_hitmask = prim_hitmask_buffer[i];
+ unsigned int sec_hitmask = sec_hitmask_buffer[i];
+#endif
+ while (prim_hitmask) {
uint32_t hit_index =
- rte_ctz32(prim_hitmask[i])
- >> 1;
+ rte_ctz32(prim_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
rte_atomic_load_explicit(
&primary_bkt[i]->key_idx[hit_index],
@@ -2175,13 +2183,13 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ prim_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
- while (sec_hitmask[i]) {
+ while (sec_hitmask) {
uint32_t hit_index =
- rte_ctz32(sec_hitmask[i])
- >> 1;
+ rte_ctz32(sec_hitmask)
+ >> hitmask_padding;
uint32_t key_idx =
rte_atomic_load_explicit(
&secondary_bkt[i]->key_idx[hit_index],
@@ -2208,7 +2216,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
positions[i] = key_idx - 1;
goto next_key;
}
- sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ sec_hitmask &= ~(1 << (hit_index << hitmask_padding));
}
next_key:
continue;
diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h
index a528f1d1a0..26a992419a 100644
--- a/lib/hash/rte_cuckoo_hash.h
+++ b/lib/hash/rte_cuckoo_hash.h
@@ -134,14 +134,6 @@ struct rte_hash_key {
char key[0];
};
-/* All different signature compare functions */
-enum rte_hash_sig_compare_function {
- RTE_HASH_COMPARE_SCALAR = 0,
- RTE_HASH_COMPARE_SSE,
- RTE_HASH_COMPARE_NEON,
- RTE_HASH_COMPARE_NUM
-};
-
/** Bucket structure */
struct __rte_cache_aligned rte_hash_bucket {
uint16_t sig_current[RTE_HASH_BUCKET_ENTRIES];
@@ -199,7 +191,7 @@ struct __rte_cache_aligned rte_hash {
/**< Custom function used to compare keys. */
enum cmp_jump_table_case cmp_jump_table_idx;
/**< Indicates which compare function to use. */
- enum rte_hash_sig_compare_function sig_cmp_fn;
+ unsigned int sig_cmp_fn;
/**< Indicates which signature compare function to use. */
uint32_t bucket_bitmask;
/**< Bitmask for getting bucket index from hash signature. */
--
2.25.1
next prev parent reply other threads:[~2024-07-03 17:13 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-20 16:51 [PATCH v2 0/4] hash: add SVE support for bulk key lookup Yoan Picchi
2023-10-20 16:51 ` [PATCH v2 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2023-10-20 16:51 ` [PATCH v2 2/4] hash: optimize compare signature for NEON Yoan Picchi
2023-10-20 16:51 ` [PATCH v2 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2023-10-20 16:51 ` [PATCH v2 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-02-27 17:41 ` [PATCH v5 0/4] " Yoan Picchi
2024-02-27 17:42 ` [PATCH v5 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-02-27 17:42 ` [PATCH v5 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-02-27 17:42 ` [PATCH v5 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-02-27 17:42 ` [PATCH v5 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-02-28 10:56 ` Konstantin Ananyev
2024-02-28 14:48 ` Yoan Picchi
2024-03-04 13:35 ` Konstantin Ananyev
2024-03-05 15:36 ` Yoan Picchi
2024-03-11 23:21 ` [PATCH v6 0/4] " Yoan Picchi
2024-03-11 23:21 ` [PATCH v6 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-03-11 23:21 ` [PATCH v6 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-03-11 23:21 ` [PATCH v6 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-03-11 23:21 ` [PATCH v6 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-03-12 3:57 ` fengchengwen
2024-03-12 15:08 ` Yoan Picchi
2024-03-12 15:42 ` [PATCH v7 0/4] " Yoan Picchi
2024-03-12 15:42 ` [PATCH v7 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-03-19 10:41 ` Konstantin Ananyev
2024-03-19 13:09 ` Yoan Picchi
2024-03-19 13:25 ` Konstantin Ananyev
2024-03-19 16:09 ` Stephen Hemminger
2024-03-12 15:42 ` [PATCH v7 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-03-20 7:37 ` [EXTERNAL] " Pavan Nikhilesh Bhagavatula
2024-04-11 13:32 ` Yoan Picchi
2024-03-12 15:42 ` [PATCH v7 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-03-12 15:42 ` [PATCH v7 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-04-17 16:08 ` [PATCH v8 0/4] " Yoan Picchi
2024-04-17 16:08 ` [PATCH v8 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-04-17 18:12 ` Stephen Hemminger
2024-04-17 16:08 ` [PATCH v8 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-04-17 16:08 ` [PATCH v8 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-04-17 16:08 ` [PATCH v8 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-04-30 16:27 ` [PATCH v9 0/4] " Yoan Picchi
2024-04-30 16:27 ` [PATCH v9 1/4] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-06-14 13:42 ` David Marchand
2024-04-30 16:27 ` [PATCH v9 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-04-30 16:27 ` [PATCH v9 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-04-30 16:27 ` [PATCH v9 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-06-14 13:42 ` David Marchand
2024-06-14 13:43 ` [PATCH v9 0/4] " David Marchand
2024-06-18 15:55 ` Konstantin Ananyev
2024-06-27 14:48 ` Thomas Monjalon
2024-07-03 17:13 ` [PATCH v10 " Yoan Picchi
2024-07-03 17:13 ` Yoan Picchi [this message]
2024-07-04 20:31 ` [PATCH v10 1/4] hash: pack the hitmask for hash in bulk lookup David Marchand
2024-07-05 17:43 ` Yoan Picchi
2024-07-07 12:08 ` Thomas Monjalon
2024-07-03 17:13 ` [PATCH v10 2/4] hash: optimize compare signature for NEON Yoan Picchi
2024-07-03 17:13 ` [PATCH v10 3/4] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-07-03 17:13 ` [PATCH v10 4/4] hash: add SVE support for bulk key lookup Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 0/7] " Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 1/7] hash: make compare signature function enum private Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 2/7] hash: split compare signature into arch-specific files Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 3/7] hash: add a check on hash entry max size Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 4/7] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 5/7] hash: optimize compare signature for NEON Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 6/7] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-07-05 17:45 ` [PATCH v11 7/7] hash: add SVE support for bulk key lookup Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 0/7] " Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 1/7] hash: make compare signature function enum private Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 2/7] hash: split compare signature into arch-specific files Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 3/7] hash: add a check on hash entry max size Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 4/7] hash: pack the hitmask for hash in bulk lookup Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 5/7] hash: optimize compare signature for NEON Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 6/7] test/hash: check bulk lookup of keys after collision Yoan Picchi
2024-07-08 12:14 ` [PATCH v12 7/7] hash: add SVE support for bulk key lookup Yoan Picchi
2024-07-09 4:48 ` [PATCH v12 0/7] " David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240703171315.1470547-2-yoan.picchi@arm.com \
--to=yoan.picchi@arm.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=nathan.brown@arm.com \
--cc=nd@arm.com \
--cc=ruifeng.wang@arm.com \
--cc=sameh.gobriel@intel.com \
--cc=thomas@monjalon.net \
--cc=vladimir.medvedkin@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).