From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 401A843C23;
	Wed, 28 Feb 2024 11:56:30 +0100 (CET)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 340D2402AC;
	Wed, 28 Feb 2024 11:56:30 +0100 (CET)
Received: from frasgout.his.huawei.com (frasgout.his.huawei.com
 [185.176.79.56]) by mails.dpdk.org (Postfix) with ESMTP id 1FCAD40295
 for <dev@dpdk.org>; Wed, 28 Feb 2024 11:56:28 +0100 (CET)
Received: from mail.maildlp.com (unknown [172.18.186.231])
 by frasgout.his.huawei.com (SkyGuard) with ESMTP id 4TlB5P32jmz67G90;
 Wed, 28 Feb 2024 18:52:41 +0800 (CST)
Received: from frapeml500007.china.huawei.com (unknown [7.182.85.172])
 by mail.maildlp.com (Postfix) with ESMTPS id 6A9C0140D1D;
 Wed, 28 Feb 2024 18:56:27 +0800 (CST)
Received: from frapeml500007.china.huawei.com (7.182.85.172) by
 frapeml500007.china.huawei.com (7.182.85.172) with Microsoft SMTP Server
 (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id
 15.1.2507.35; Wed, 28 Feb 2024 11:56:27 +0100
Received: from frapeml500007.china.huawei.com ([7.182.85.172]) by
 frapeml500007.china.huawei.com ([7.182.85.172]) with mapi id 15.01.2507.035;
 Wed, 28 Feb 2024 11:56:27 +0100
From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
To: Yoan Picchi <yoan.picchi@arm.com>, Yipeng Wang <yipeng1.wang@intel.com>,
 Sameh Gobriel <sameh.gobriel@intel.com>, Bruce Richardson
 <bruce.richardson@intel.com>, Vladimir Medvedkin
 <vladimir.medvedkin@intel.com>
CC: "dev@dpdk.org" <dev@dpdk.org>, "nd@arm.com" <nd@arm.com>, Harjot Singh
 <harjot.singh@arm.com>, Nathan Brown <nathan.brown@arm.com>, Ruifeng Wang
 <ruifeng.wang@arm.com>
Subject: RE: [PATCH v5 4/4] hash: add SVE support for bulk key lookup
Thread-Topic: [PATCH v5 4/4] hash: add SVE support for bulk key lookup
Thread-Index: AQHaaaRwFZ8tWpxDX02xRU9Vz1/oT7EflQEg
Date: Wed, 28 Feb 2024 10:56:26 +0000
Message-ID: <a6c022b63ad9493a9245f76bb398a0a5@huawei.com>
References: <20231020165159.1649282-1-yoan.picchi@arm.com>
 <20240227174203.2889333-1-yoan.picchi@arm.com>
 <20240227174203.2889333-5-yoan.picchi@arm.com>
In-Reply-To: <20240227174203.2889333-5-yoan.picchi@arm.com>
Accept-Language: en-US
Content-Language: en-US
X-MS-Has-Attach: 
X-MS-TNEF-Correlator: 
x-originating-ip: [10.206.138.42]
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org


>=20
> - Implemented SVE code for comparing signatures in bulk lookup.
> - Added Defines in code for SVE code support.
> - Optimise NEON code
> - New SVE code is ~5% slower than optimized NEON for N2 processor.
>=20
> Signed-off-by: Yoan Picchi <yoan.picchi@arm.com>
> Signed-off-by: Harjot Singh <harjot.singh@arm.com>
> Reviewed-by: Nathan Brown <nathan.brown@arm.com>
> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
>  lib/hash/rte_cuckoo_hash.c | 196 ++++++++++++++++++++++++++++---------
>  lib/hash/rte_cuckoo_hash.h |   1 +
>  2 files changed, 151 insertions(+), 46 deletions(-)
>=20
> diff --git a/lib/hash/rte_cuckoo_hash.c b/lib/hash/rte_cuckoo_hash.c
> index a07dd3a28d..231d6d6ded 100644
> --- a/lib/hash/rte_cuckoo_hash.c
> +++ b/lib/hash/rte_cuckoo_hash.c
> @@ -442,8 +442,11 @@ rte_hash_create(const struct rte_hash_parameters *pa=
rams)
>  		h->sig_cmp_fn =3D RTE_HASH_COMPARE_SSE;
>  	else
>  #elif defined(RTE_ARCH_ARM64)
> -	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
> +	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
>  		h->sig_cmp_fn =3D RTE_HASH_COMPARE_NEON;
> +		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SVE))
> +			h->sig_cmp_fn =3D RTE_HASH_COMPARE_SVE;
> +	}
>  	else
>  #endif
>  		h->sig_cmp_fn =3D RTE_HASH_COMPARE_SCALAR;
> @@ -1860,37 +1863,103 @@ rte_hash_free_key_with_position(const struct rte=
_hash *h,
>  #if defined(__ARM_NEON)
>=20
>  static inline void
> -compare_signatures_dense(uint32_t *prim_hash_matches, uint32_t *sec_hash=
_matches,
> -			const struct rte_hash_bucket *prim_bkt,
> -			const struct rte_hash_bucket *sec_bkt,
> +compare_signatures_dense(uint16_t *hitmask_buffer,
> +			const uint16_t *prim_bucket_sigs,
> +			const uint16_t *sec_bucket_sigs,
>  			uint16_t sig,
>  			enum rte_hash_sig_compare_function sig_cmp_fn)
>  {
>  	unsigned int i;
>=20
> +	static_assert(sizeof(*hitmask_buffer) >=3D 2*(RTE_HASH_BUCKET_ENTRIES/8=
),
> +	"The hitmask must be exactly wide enough to accept the whole hitmask if=
 it is dense");
> +
>  	/* For match mask every bits indicates the match */
>  	switch (sig_cmp_fn) {

Can I ask to move arch specific comparison code into some arch-specific hea=
ders or so?
It is getting really hard to read and understand the generic code with all =
these ifdefs and arch specific instructions...

> +#if RTE_HASH_BUCKET_ENTRIES <=3D 8
>  	case RTE_HASH_COMPARE_NEON: {
> -		uint16x8_t vmat, x;
> +		uint16x8_t vmat, hit1, hit2;
>  		const uint16x8_t mask =3D {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}=
;
>  		const uint16x8_t vsig =3D vld1q_dup_u16((uint16_t const *)&sig);
>=20
>  		/* Compare all signatures in the primary bucket */
> -		vmat =3D vceqq_u16(vsig, vld1q_u16((uint16_t const *)prim_bkt->sig_cur=
rent));
> -		x =3D vandq_u16(vmat, mask);
> -		*prim_hash_matches =3D (uint32_t)(vaddvq_u16(x));
> +		vmat =3D vceqq_u16(vsig, vld1q_u16(prim_bucket_sigs));
> +		hit1 =3D vandq_u16(vmat, mask);
> +
>  		/* Compare all signatures in the secondary bucket */
> -		vmat =3D vceqq_u16(vsig, vld1q_u16((uint16_t const *)sec_bkt->sig_curr=
ent));
> -		x =3D vandq_u16(vmat, mask);
> -		*sec_hash_matches =3D (uint32_t)(vaddvq_u16(x));
> +		vmat =3D vceqq_u16(vsig, vld1q_u16(sec_bucket_sigs));
> +		hit2 =3D vandq_u16(vmat, mask);
> +
> +		hit2 =3D vshlq_n_u16(hit2, RTE_HASH_BUCKET_ENTRIES);
> +		hit2 =3D vorrq_u16(hit1, hit2);
> +		*hitmask_buffer =3D vaddvq_u16(hit2);
> +		}
> +		break;
> +#endif
> +#if defined(RTE_HAS_SVE_ACLE)
> +	case RTE_HASH_COMPARE_SVE: {
> +		svuint16_t vsign, shift, sv_matches;
> +		svbool_t pred, match, bucket_wide_pred;
> +		int i =3D 0;
> +		uint64_t vl =3D svcnth();
> +
> +		vsign =3D svdup_u16(sig);
> +		shift =3D svindex_u16(0, 1);
> +
> +		if (vl >=3D 2 * RTE_HASH_BUCKET_ENTRIES && RTE_HASH_BUCKET_ENTRIES <=
=3D 8) {
> +			svuint16_t primary_array_vect, secondary_array_vect;
> +			bucket_wide_pred =3D svwhilelt_b16(0, RTE_HASH_BUCKET_ENTRIES);
> +			primary_array_vect =3D svld1_u16(bucket_wide_pred, prim_bucket_sigs);
> +			secondary_array_vect =3D svld1_u16(bucket_wide_pred, sec_bucket_sigs)=
;
> +
> +			/* We merged the two vectors so we can do both comparison at once */
> +			primary_array_vect =3D svsplice_u16(bucket_wide_pred,
> +				primary_array_vect,
> +				secondary_array_vect);
> +			pred =3D svwhilelt_b16(0, 2*RTE_HASH_BUCKET_ENTRIES);
> +
> +			/* Compare all signatures in the buckets */
> +			match =3D svcmpeq_u16(pred, vsign, primary_array_vect);
> +			if (svptest_any(svptrue_b16(), match)) {
> +				sv_matches =3D svdup_u16(1);
> +				sv_matches =3D svlsl_u16_z(match, sv_matches, shift);
> +				*hitmask_buffer =3D svorv_u16(svptrue_b16(), sv_matches);
> +			}
> +		} else {
> +			do {
> +				pred =3D svwhilelt_b16(i, RTE_HASH_BUCKET_ENTRIES);
> +				uint16_t lower_half =3D 0;
> +				uint16_t upper_half =3D 0;
> +				/* Compare all signatures in the primary bucket */
> +				match =3D svcmpeq_u16(pred, vsign, svld1_u16(pred,
> +							&prim_bucket_sigs[i]));
> +				if (svptest_any(svptrue_b16(), match)) {
> +					sv_matches =3D svdup_u16(1);
> +					sv_matches =3D svlsl_u16_z(match, sv_matches, shift);
> +					lower_half =3D svorv_u16(svptrue_b16(), sv_matches);
> +				}
> +				/* Compare all signatures in the secondary bucket */
> +				match =3D svcmpeq_u16(pred, vsign, svld1_u16(pred,
> +							&sec_bucket_sigs[i]));
> +				if (svptest_any(svptrue_b16(), match)) {
> +					sv_matches =3D svdup_u16(1);
> +					sv_matches =3D svlsl_u16_z(match, sv_matches, shift);
> +					upper_half =3D svorv_u16(svptrue_b16(), sv_matches)
> +						<< RTE_HASH_BUCKET_ENTRIES;
> +				}
> +				hitmask_buffer[i/8] =3D upper_half | lower_half;
> +				i +=3D vl;
> +			} while (i < RTE_HASH_BUCKET_ENTRIES);
> +		}
>  		}
>  		break;
> +#endif
>  	default:
>  		for (i =3D 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
> -			*prim_hash_matches |=3D
> -				((sig =3D=3D prim_bkt->sig_current[i]) << i);
> -			*sec_hash_matches |=3D
> -				((sig =3D=3D sec_bkt->sig_current[i]) << i);
> +			*hitmask_buffer |=3D
> +				((sig =3D=3D prim_bucket_sigs[i]) << i);
> +			*hitmask_buffer |=3D
> +				((sig =3D=3D sec_bucket_sigs[i]) << i) << RTE_HASH_BUCKET_ENTRIES;
>  		}
>  	}
>  }
> @@ -1908,7 +1977,7 @@ compare_signatures_sparse(uint32_t *prim_hash_match=
es, uint32_t *sec_hash_matche
>=20
>  	/* For match mask the first bit of every two bits indicates the match *=
/
>  	switch (sig_cmp_fn) {
> -#if defined(__SSE2__)
> +#if defined(__SSE2__) && RTE_HASH_BUCKET_ENTRIES <=3D 8
>  	case RTE_HASH_COMPARE_SSE:
>  		/* Compare all signatures in the bucket */
>  		*prim_hash_matches =3D _mm_movemask_epi8(_mm_cmpeq_epi16(
> @@ -1948,14 +2017,18 @@ __bulk_lookup_l(const struct rte_hash *h, const v=
oid **keys,
>  	uint64_t hits =3D 0;
>  	int32_t i;
>  	int32_t ret;
> -	uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> -	uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
>  	struct rte_hash_bucket *cur_bkt, *next_bkt;
>=20
>  #if defined(__ARM_NEON)
>  	const int hitmask_padding =3D 0;
> +	uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> +
> +	static_assert(sizeof(*hitmask_buffer)*8/2 =3D=3D RTE_HASH_BUCKET_ENTRIE=
S,
> +	"The hitmask must be exactly wide enough to accept the whole hitmask wh=
en it is dense");
>  #else
>  	const int hitmask_padding =3D 1;
> +	uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> +	uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
>  #endif
>=20
>  	__hash_rw_reader_lock(h);
> @@ -1963,18 +2036,24 @@ __bulk_lookup_l(const struct rte_hash *h, const v=
oid **keys,
>  	/* Compare signatures and prefetch key slot of first hit */
>  	for (i =3D 0; i < num_keys; i++) {
>  #if defined(__ARM_NEON)
> -		compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
> -			primary_bkt[i], secondary_bkt[i],
> +		uint16_t *hitmask =3D &hitmask_buffer[i];
> +		compare_signatures_dense(hitmask,
> +			primary_bkt[i]->sig_current,
> +			secondary_bkt[i]->sig_current,
>  			sig[i], h->sig_cmp_fn);
> +		const unsigned int prim_hitmask =3D *(uint8_t *)(hitmask);
> +		const unsigned int sec_hitmask =3D *((uint8_t *)(hitmask)+1);
>  #else
> -		compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
> +		compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffer=
[i],
>  			primary_bkt[i], secondary_bkt[i],
>  			sig[i], h->sig_cmp_fn);
> +		const unsigned int prim_hitmask =3D prim_hitmask_buffer[i];
> +		const unsigned int sec_hitmask =3D sec_hitmask_buffer[i];
>  #endif
>=20
> -		if (prim_hitmask[i]) {
> +		if (prim_hitmask) {
>  			uint32_t first_hit =3D
> -					rte_ctz32(prim_hitmask[i])
> +					rte_ctz32(prim_hitmask)
>  					>> hitmask_padding;
>  			uint32_t key_idx =3D
>  				primary_bkt[i]->key_idx[first_hit];
> @@ -1986,9 +2065,9 @@ __bulk_lookup_l(const struct rte_hash *h, const voi=
d **keys,
>  			continue;
>  		}
>=20
> -		if (sec_hitmask[i]) {
> +		if (sec_hitmask) {
>  			uint32_t first_hit =3D
> -					rte_ctz32(sec_hitmask[i])
> +					rte_ctz32(sec_hitmask)
>  					>> hitmask_padding;
>  			uint32_t key_idx =3D
>  				secondary_bkt[i]->key_idx[first_hit];
> @@ -2003,9 +2082,17 @@ __bulk_lookup_l(const struct rte_hash *h, const vo=
id **keys,
>  	/* Compare keys, first hits in primary first */
>  	for (i =3D 0; i < num_keys; i++) {
>  		positions[i] =3D -ENOENT;
> -		while (prim_hitmask[i]) {
> +#if defined(__ARM_NEON)
> +		uint16_t *hitmask =3D &hitmask_buffer[i];
> +		unsigned int prim_hitmask =3D *(uint8_t *)(hitmask);
> +		unsigned int sec_hitmask =3D *((uint8_t *)(hitmask)+1);
> +#else
> +		unsigned int prim_hitmask =3D prim_hitmask_buffer[i];
> +		unsigned int sec_hitmask =3D sec_hitmask_buffer[i];
> +#endif
> +		while (prim_hitmask) {
>  			uint32_t hit_index =3D
> -					rte_ctz32(prim_hitmask[i])
> +					rte_ctz32(prim_hitmask)
>  					>> hitmask_padding;
>  			uint32_t key_idx =3D
>  				primary_bkt[i]->key_idx[hit_index];
> @@ -2028,12 +2115,12 @@ __bulk_lookup_l(const struct rte_hash *h, const v=
oid **keys,
>  				positions[i] =3D key_idx - 1;
>  				goto next_key;
>  			}
> -			prim_hitmask[i] &=3D ~(1 << (hit_index << hitmask_padding));
> +			prim_hitmask &=3D ~(1 << (hit_index << hitmask_padding));
>  		}
>=20
> -		while (sec_hitmask[i]) {
> +		while (sec_hitmask) {
>  			uint32_t hit_index =3D
> -					rte_ctz32(sec_hitmask[i])
> +					rte_ctz32(sec_hitmask)
>  					>> hitmask_padding;
>  			uint32_t key_idx =3D
>  				secondary_bkt[i]->key_idx[hit_index];
> @@ -2057,7 +2144,7 @@ __bulk_lookup_l(const struct rte_hash *h, const voi=
d **keys,
>  				positions[i] =3D key_idx - 1;
>  				goto next_key;
>  			}
> -			sec_hitmask[i] &=3D ~(1 << (hit_index << hitmask_padding));
> +			sec_hitmask &=3D ~(1 << (hit_index << hitmask_padding));
>  		}
>  next_key:
>  		continue;
> @@ -2107,15 +2194,18 @@ __bulk_lookup_lf(const struct rte_hash *h, const =
void **keys,
>  	uint64_t hits =3D 0;
>  	int32_t i;
>  	int32_t ret;
> -	uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> -	uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
>  	struct rte_hash_bucket *cur_bkt, *next_bkt;
>  	uint32_t cnt_b, cnt_a;
>=20
>  #if defined(__ARM_NEON)
>  	const int hitmask_padding =3D 0;
> +	uint16_t hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> +	static_assert(sizeof(*hitmask_buffer)*8/2 =3D=3D RTE_HASH_BUCKET_ENTRIE=
S,
> +	"The hitmask must be exactly wide enough to accept the whole hitmask ch=
en it is dense");
>  #else
>  	const int hitmask_padding =3D 1;
> +	uint32_t prim_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
> +	uint32_t sec_hitmask_buffer[RTE_HASH_LOOKUP_BULK_MAX] =3D {0};
>  #endif
>=20
>  	for (i =3D 0; i < num_keys; i++)
> @@ -2132,18 +2222,24 @@ __bulk_lookup_lf(const struct rte_hash *h, const =
void **keys,
>  		/* Compare signatures and prefetch key slot of first hit */
>  		for (i =3D 0; i < num_keys; i++) {
>  #if defined(__ARM_NEON)
> -			compare_signatures_dense(&prim_hitmask[i], &sec_hitmask[i],
> -				primary_bkt[i], secondary_bkt[i],
> +			uint16_t *hitmask =3D &hitmask_buffer[i];
> +			compare_signatures_dense(hitmask,
> +				primary_bkt[i]->sig_current,
> +				secondary_bkt[i]->sig_current,
>  				sig[i], h->sig_cmp_fn);
> +			const unsigned int prim_hitmask =3D *(uint8_t *)(hitmask);
> +			const unsigned int sec_hitmask =3D *((uint8_t *)(hitmask)+1);
>  #else
> -			compare_signatures_sparse(&prim_hitmask[i], &sec_hitmask[i],
> +			compare_signatures_sparse(&prim_hitmask_buffer[i], &sec_hitmask_buffe=
r[i],
>  				primary_bkt[i], secondary_bkt[i],
>  				sig[i], h->sig_cmp_fn);
> +			const unsigned int prim_hitmask =3D prim_hitmask_buffer[i];
> +			const unsigned int sec_hitmask =3D sec_hitmask_buffer[i];
>  #endif
>=20
> -			if (prim_hitmask[i]) {
> +			if (prim_hitmask) {
>  				uint32_t first_hit =3D
> -						rte_ctz32(prim_hitmask[i])
> +						rte_ctz32(prim_hitmask)
>  						>> hitmask_padding;
>  				uint32_t key_idx =3D
>  					primary_bkt[i]->key_idx[first_hit];
> @@ -2155,9 +2251,9 @@ __bulk_lookup_lf(const struct rte_hash *h, const vo=
id **keys,
>  				continue;
>  			}
>=20
> -			if (sec_hitmask[i]) {
> +			if (sec_hitmask) {
>  				uint32_t first_hit =3D
> -						rte_ctz32(sec_hitmask[i])
> +						rte_ctz32(sec_hitmask)
>  						>> hitmask_padding;
>  				uint32_t key_idx =3D
>  					secondary_bkt[i]->key_idx[first_hit];
> @@ -2171,9 +2267,17 @@ __bulk_lookup_lf(const struct rte_hash *h, const v=
oid **keys,
>=20
>  		/* Compare keys, first hits in primary first */
>  		for (i =3D 0; i < num_keys; i++) {
> -			while (prim_hitmask[i]) {
> +#if defined(__ARM_NEON)
> +			uint16_t *hitmask =3D &hitmask_buffer[i];
> +			unsigned int prim_hitmask =3D *(uint8_t *)(hitmask);
> +			unsigned int sec_hitmask =3D *((uint8_t *)(hitmask)+1);
> +#else
> +			unsigned int prim_hitmask =3D prim_hitmask_buffer[i];
> +			unsigned int sec_hitmask =3D sec_hitmask_buffer[i];
> +#endif
> +			while (prim_hitmask) {
>  				uint32_t hit_index =3D
> -						rte_ctz32(prim_hitmask[i])
> +						rte_ctz32(prim_hitmask)
>  						>> hitmask_padding;
>  				uint32_t key_idx =3D
>  				rte_atomic_load_explicit(
> @@ -2200,12 +2304,12 @@ __bulk_lookup_lf(const struct rte_hash *h, const =
void **keys,
>  					positions[i] =3D key_idx - 1;
>  					goto next_key;
>  				}
> -				prim_hitmask[i] &=3D ~(1 << (hit_index << hitmask_padding));
> +				prim_hitmask &=3D ~(1 << (hit_index << hitmask_padding));
>  			}
>=20
> -			while (sec_hitmask[i]) {
> +			while (sec_hitmask) {
>  				uint32_t hit_index =3D
> -						rte_ctz32(sec_hitmask[i])
> +						rte_ctz32(sec_hitmask)
>  						>> hitmask_padding;
>  				uint32_t key_idx =3D
>  				rte_atomic_load_explicit(
> @@ -2233,7 +2337,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const vo=
id **keys,
>  					positions[i] =3D key_idx - 1;
>  					goto next_key;
>  				}
> -				sec_hitmask[i] &=3D ~(1 << (hit_index << hitmask_padding));
> +				sec_hitmask &=3D ~(1 << (hit_index << hitmask_padding));
>  			}
>  next_key:
>  			continue;
> diff --git a/lib/hash/rte_cuckoo_hash.h b/lib/hash/rte_cuckoo_hash.h
> index 8ea793c66e..ed18e1f41e 100644
> --- a/lib/hash/rte_cuckoo_hash.h
> +++ b/lib/hash/rte_cuckoo_hash.h
> @@ -137,6 +137,7 @@ enum rte_hash_sig_compare_function {
>  	RTE_HASH_COMPARE_SCALAR =3D 0,
>  	RTE_HASH_COMPARE_SSE,
>  	RTE_HASH_COMPARE_NEON,
> +	RTE_HASH_COMPARE_SVE,
>  	RTE_HASH_COMPARE_NUM
>  };
>=20
> --
> 2.34.1