rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: cbc2f1dccfba ("lpm/arm: support NEON") Cc: jerinj@marvell.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> --- lib/librte_lpm/rte_lpm_neon.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_neon.h b/lib/librte_lpm/rte_lpm_neon.h index 6c131d312..4642a866f 100644 --- a/lib/librte_lpm/rte_lpm_neon.h +++ b/lib/librte_lpm/rte_lpm_neon.h @@ -81,28 +81,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1
rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: dc81ebbacaeb ("lpm: extend IPv4 next hop field") Cc: michalx.kobylinski@intel.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> --- lib/librte_lpm/rte_lpm_sse.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_sse.h b/lib/librte_lpm/rte_lpm_sse.h index 44770b6ff..eaa863c52 100644 --- a/lib/librte_lpm/rte_lpm_sse.h +++ b/lib/librte_lpm/rte_lpm_sse.h @@ -82,28 +82,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1
rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: d2cc7959342b ("lpm: add AltiVec for ppc64") Cc: gowrishankar.m@linux.vnet.ibm.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> --- lib/librte_lpm/rte_lpm_altivec.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_altivec.h b/lib/librte_lpm/rte_lpm_altivec.h index 228c41b38..4fbc1b595 100644 --- a/lib/librte_lpm/rte_lpm_altivec.h +++ b/lib/librte_lpm/rte_lpm_altivec.h @@ -88,28 +88,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1
On 1/8/21 12:21 AM, Ruifeng Wang wrote:
> rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8
> groups are created. This is caused by incorrect type casting of tbl8
> group index that been stored in tbl24 entry. The casting caused group
> index truncation and hence wrong tbl8 group been searched.
>
> Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index.
>
> Fixes: d2cc7959342b ("lpm: add AltiVec for ppc64")
> Cc: gowrishankar.m@linux.vnet.ibm.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
> lib/librte_lpm/rte_lpm_altivec.h | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/lib/librte_lpm/rte_lpm_altivec.h b/lib/librte_lpm/rte_lpm_altivec.h
> index 228c41b38..4fbc1b595 100644
> --- a/lib/librte_lpm/rte_lpm_altivec.h
> +++ b/lib/librte_lpm/rte_lpm_altivec.h
> @@ -88,28 +88,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
> if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[0] = i8.u32[0] +
> - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
> tbl[0] = *ptbl;
> }
> if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[1] = i8.u32[1] +
> - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
> tbl[1] = *ptbl;
> }
> if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[2] = i8.u32[2] +
> - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
> tbl[2] = *ptbl;
> }
> if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[3] = i8.u32[3] +
> - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
> tbl[3] = *ptbl;
> }
>
Tested-by: David Christensen <drc@linux.vnet.ibm.com>
On 08/01/2021 08:21, Ruifeng Wang wrote:
> rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8
> groups are created. This is caused by incorrect type casting of tbl8
> group index that been stored in tbl24 entry. The casting caused group
> index truncation and hence wrong tbl8 group been searched.
>
> Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index.
>
> Fixes: dc81ebbacaeb ("lpm: extend IPv4 next hop field")
> Cc: michalx.kobylinski@intel.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
> ---
> lib/librte_lpm/rte_lpm_sse.h | 8 ++++----
> 1 file changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/lib/librte_lpm/rte_lpm_sse.h b/lib/librte_lpm/rte_lpm_sse.h
> index 44770b6ff..eaa863c52 100644
> --- a/lib/librte_lpm/rte_lpm_sse.h
> +++ b/lib/librte_lpm/rte_lpm_sse.h
> @@ -82,28 +82,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
> if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[0] = i8.u32[0] +
> - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
> tbl[0] = *ptbl;
> }
> if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[1] = i8.u32[1] +
> - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
> tbl[1] = *ptbl;
> }
> if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[2] = i8.u32[2] +
> - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
> tbl[2] = *ptbl;
> }
> if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
> RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
> i8.u32[3] = i8.u32[3] +
> - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
> ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
> tbl[3] = *ptbl;
> }
>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
--
Regards,
Vladimir
rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: cbc2f1dccfba ("lpm/arm: support NEON") Cc: jerinj@marvell.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com> --- lib/librte_lpm/rte_lpm_neon.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_neon.h b/lib/librte_lpm/rte_lpm_neon.h index 6c131d312..4642a866f 100644 --- a/lib/librte_lpm/rte_lpm_neon.h +++ b/lib/librte_lpm/rte_lpm_neon.h @@ -81,28 +81,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1
rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: dc81ebbacaeb ("lpm: extend IPv4 next hop field") Cc: michalx.kobylinski@intel.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com> --- lib/librte_lpm/rte_lpm_sse.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_sse.h b/lib/librte_lpm/rte_lpm_sse.h index 44770b6ff..eaa863c52 100644 --- a/lib/librte_lpm/rte_lpm_sse.h +++ b/lib/librte_lpm/rte_lpm_sse.h @@ -82,28 +82,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1
rte_lpm_lookupx4 could return wrong next hop when more than 256 tbl8 groups are created. This is caused by incorrect type casting of tbl8 group index that been stored in tbl24 entry. The casting caused group index truncation and hence wrong tbl8 group been searched. Issue fixed by applying proper mask to tbl24 entry to get tbl8 group index. Fixes: d2cc7959342b ("lpm: add AltiVec for ppc64") Cc: gowrishankar.m@linux.vnet.ibm.com Cc: stable@dpdk.org Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com> Tested-by: David Christensen <drc@linux.vnet.ibm.com> Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com> --- lib/librte_lpm/rte_lpm_altivec.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/librte_lpm/rte_lpm_altivec.h b/lib/librte_lpm/rte_lpm_altivec.h index 228c41b38..4fbc1b595 100644 --- a/lib/librte_lpm/rte_lpm_altivec.h +++ b/lib/librte_lpm/rte_lpm_altivec.h @@ -88,28 +88,28 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4], if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[0] = i8.u32[0] + - (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[0] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]]; tbl[0] = *ptbl; } if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[1] = i8.u32[1] + - (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[1] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]]; tbl[1] = *ptbl; } if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[2] = i8.u32[2] + - (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[2] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]]; tbl[2] = *ptbl; } if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { i8.u32[3] = i8.u32[3] + - (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + (tbl[3] & 0x00FFFFFF) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]]; tbl[3] = *ptbl; } -- 2.25.1