On Tue, 1 Jul 2025, 20:20 , wrote: > From: Sun Yuechi > > Implement LPM lookupx4 function for RISC-V architecture using RISC-V > Vector Extension instruction set > > Signed-off-by: Sun Yuechi > Reviewed-by: Stanisław Kardach > --- > MAINTAINERS | 2 ++ > lib/lpm/meson.build | 1 + > lib/lpm/rte_lpm.h | 2 ++ > lib/lpm/rte_lpm_rvv.h | 59 +++++++++++++++++++++++++++++++++++++++++++ > 4 files changed, 64 insertions(+) > create mode 100644 lib/lpm/rte_lpm_rvv.h > > diff --git a/MAINTAINERS b/MAINTAINERS > index 0e9357f3a3..9bd97879b6 100644 > --- a/MAINTAINERS > +++ b/MAINTAINERS > @@ -341,6 +341,8 @@ M: Stanislaw Kardach > F: config/riscv/ > F: doc/guides/linux_gsg/cross_build_dpdk_for_riscv.rst > F: lib/eal/riscv/ > +M: sunyuechi > +F: lib/**/*rvv* > > Intel x86 > M: Bruce Richardson > diff --git a/lib/lpm/meson.build b/lib/lpm/meson.build > index cff8fed473..c4522eaf0c 100644 > --- a/lib/lpm/meson.build > +++ b/lib/lpm/meson.build > @@ -11,6 +11,7 @@ indirect_headers += files( > 'rte_lpm_scalar.h', > 'rte_lpm_sse.h', > 'rte_lpm_sve.h', > + 'rte_lpm_rvv.h', > ) > deps += ['hash'] > deps += ['rcu'] > diff --git a/lib/lpm/rte_lpm.h b/lib/lpm/rte_lpm.h > index 6bf8d9d883..edfe77b458 100644 > --- a/lib/lpm/rte_lpm.h > +++ b/lib/lpm/rte_lpm.h > @@ -420,6 +420,8 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, > uint32_t hop[4], > #include "rte_lpm_altivec.h" > #elif defined(RTE_ARCH_X86) > #include "rte_lpm_sse.h" > +#elif defined(RTE_ARCH_RISCV) && defined(RTE_RISCV_FEATURE_V) > +#include "rte_lpm_rvv.h" > #else > #include "rte_lpm_scalar.h" > #endif > diff --git a/lib/lpm/rte_lpm_rvv.h b/lib/lpm/rte_lpm_rvv.h > new file mode 100644 > index 0000000000..0d3dc91055 > --- /dev/null > +++ b/lib/lpm/rte_lpm_rvv.h > @@ -0,0 +1,59 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright (c) 2025 Institute of Software Chinese Academy of Sciences > (ISCAS). > + */ > + > +#ifndef _RTE_LPM_RVV_H_ > +#define _RTE_LPM_RVV_H_ > + > +#include > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +#define RTE_LPM_LOOKUP_SUCCESS 0x01000000 > +#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000 > + > +static inline void rte_lpm_lookupx4( > + const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], uint32_t > defv) > +{ > + size_t vl = 4; > + > + const uint32_t *tbl24_p = (const uint32_t *)lpm->tbl24; > + uint32_t tbl_entries[4] = { > + tbl24_p[((uint32_t)ip[0]) >> 8], > + tbl24_p[((uint32_t)ip[1]) >> 8], > + tbl24_p[((uint32_t)ip[2]) >> 8], > + tbl24_p[((uint32_t)ip[3]) >> 8], > + }; > + vuint32m1_t vtbl_entry = __riscv_vle32_v_u32m1(tbl_entries, vl); > + > + vbool32_t mask = __riscv_vmseq_vx_u32m1_b32( > + __riscv_vand_vx_u32m1(vtbl_entry, > RTE_LPM_VALID_EXT_ENTRY_BITMASK, vl), > + RTE_LPM_VALID_EXT_ENTRY_BITMASK, vl); > + > + vuint32m1_t vtbl8_index = __riscv_vsll_vx_u32m1( > + __riscv_vadd_vv_u32m1( > + __riscv_vsll_vx_u32m1(__riscv_vand_vx_u32m1(vtbl_entry, > 0x00FFFFFF, vl), 8, vl), > + __riscv_vand_vx_u32m1( > + __riscv_vle32_v_u32m1((const uint32_t *)&ip, vl), > 0x000000FF, vl), > + vl), > + 2, vl); > + > + vtbl_entry = __riscv_vluxei32_v_u32m1_mu( > + mask, vtbl_entry, (const uint32_t *)(lpm->tbl8), vtbl8_index, > vl); > + > + vuint32m1_t vnext_hop = __riscv_vand_vx_u32m1(vtbl_entry, > 0x00FFFFFF, vl); > + mask = __riscv_vmseq_vx_u32m1_b32( > + __riscv_vand_vx_u32m1(vtbl_entry, RTE_LPM_LOOKUP_SUCCESS, vl), > 0, vl); > + > + vnext_hop = __riscv_vmerge_vxm_u32m1(vnext_hop, defv, mask, vl); > + > + __riscv_vse32_v_u32m1(hop, vnext_hop, vl); > +} > + > +#ifdef __cplusplus > +} > +#endif > + > +#endif /* _RTE_LPM_RVV_H_ */ > -- > 2.50.0 > >