From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 6A56D68B9 for ; Thu, 22 May 2014 18:56:29 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga101.jf.intel.com with ESMTP; 22 May 2014 09:56:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.98,888,1392192000"; d="scan'208";a="516134406" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga001.jf.intel.com with ESMTP; 22 May 2014 09:56:35 -0700 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id s4MGuYig017273; Thu, 22 May 2014 17:56:34 +0100 Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id s4MGuYrL001070; Thu, 22 May 2014 17:56:34 +0100 Received: (from kananye1@localhost) by sivswdev02.ir.intel.com with id s4MGuYfU001066; Thu, 22 May 2014 17:56:34 +0100 From: Konstantin Ananyev To: dev@dpdk.org Date: Thu, 22 May 2014 17:55:41 +0100 Message-Id: <1400777742-498-2-git-send-email-konstantin.ananyev@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1400777742-498-1-git-send-email-konstantin.ananyev@intel.com> References: <1400777742-498-1-git-send-email-konstantin.ananyev@intel.com> To: dev@dpdk.org Subject: [dpdk-dev] [PATCH 1/2] lpm: Introduce rte_lpm_lookupx4 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 22 May 2014 16:56:30 -0000 Introduce rte_lpm_lookupx4(): - Allows to lookup four IP addresses in an LPM table. - Uses SSE instrincts. Signed-off-by: Konstantin Ananyev --- app/test/test_lpm.c | 70 ++++++++++++++ lib/librte_eal/common/Makefile | 1 + lib/librte_eal/common/include/rte_common_vect.h | 93 ++++++++++++++++++ lib/librte_lpm/rte_lpm.h | 117 +++++++++++++++++++++++ 4 files changed, 281 insertions(+), 0 deletions(-) create mode 100644 lib/librte_eal/common/include/rte_common_vect.h diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c index ffed766..1b2f32a 100644 --- a/app/test/test_lpm.c +++ b/app/test/test_lpm.c @@ -310,6 +310,8 @@ test6(void) int32_t test7(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip = IPv4(0, 0, 0, 0); uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0; @@ -324,6 +326,13 @@ test7(void) status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == next_hop_add); + TEST_LPM_ASSERT(hop[1] == UINT16_MAX); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); + status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); @@ -347,6 +356,8 @@ test7(void) int32_t test8(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0); uint8_t depth, next_hop_add, next_hop_return; @@ -370,6 +381,13 @@ test8(void) status = rte_lpm_lookup(lpm, ip2, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); } /* Loop with rte_lpm_delete. */ @@ -391,6 +409,18 @@ test8(void) status = rte_lpm_lookup(lpm, ip1, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); + + ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + if (depth != 1) { + TEST_LPM_ASSERT(hop[0] == next_hop_add); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + } else { + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == UINT16_MAX); + } + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == UINT16_MAX); } rte_lpm_free(lpm); @@ -822,6 +852,8 @@ test11(void) int32_t test12(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip, i; uint8_t depth, next_hop_add, next_hop_return; @@ -842,6 +874,13 @@ test12(void) TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); + status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); @@ -1237,6 +1276,37 @@ perf_test(void) (double)total_time / ((double)ITERATIONS * BATCH_SIZE), (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); + /* Measure LookupX4 */ + total_time = 0; + count = 0; + for (i = 0; i < ITERATIONS; i ++) { + static uint32_t ip_batch[BATCH_SIZE]; + uint16_t next_hops[4]; + + /* Create array of random IP addresses */ + for (j = 0; j < BATCH_SIZE; j ++) + ip_batch[j] = rte_rand(); + + /* Lookup per batch */ + begin = rte_rdtsc(); + for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) { + unsigned k; + __m128i ipx4; + + ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j)); + ipx4 = *(__m128i *)(ip_batch + j); + rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX); + for (k = 0; k < RTE_DIM(next_hops); k++) + if (unlikely (next_hops[k] == UINT16_MAX)) + count++; + } + + total_time += rte_rdtsc() - begin; + } + printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n", + (double)total_time / ((double)ITERATIONS * BATCH_SIZE), + (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); + /* Delete */ status = 0; begin = rte_rdtsc(); diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile index 0016fc5..3103019 100644 --- a/lib/librte_eal/common/Makefile +++ b/lib/librte_eal/common/Makefile @@ -39,6 +39,7 @@ INC += rte_rwlock.h rte_spinlock.h rte_tailq.h rte_interrupts.h rte_alarm.h INC += rte_string_fns.h rte_cpuflags.h rte_version.h rte_tailq_elem.h INC += rte_eal_memconfig.h rte_malloc_heap.h INC += rte_hexdump.h rte_devargs.h rte_dev.h +INC += rte_common_vect.h ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y) INC += rte_warnings.h diff --git a/lib/librte_eal/common/include/rte_common_vect.h b/lib/librte_eal/common/include/rte_common_vect.h new file mode 100644 index 0000000..065fc52 --- /dev/null +++ b/lib/librte_eal/common/include/rte_common_vect.h @@ -0,0 +1,93 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_COMMON_VECT_H_ +#define _RTE_COMMON_VECT_H_ + +/** + * @file + * + * RTE SSE/AVX related header. + */ + +#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) + +#ifdef __SSE__ +#include +#endif + +#ifdef __SSE2__ +#include +#endif + +#if defined (__SSE4_2__) || defined (__SSE4_1__) +#include +#endif + +#else + +#include + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef __m128i xmm_t; + +#define XMM_SIZE (sizeof (xmm_t)) +#define XMM_MASK (XMM_SIZE - 1) + +typedef union rte_xmm { + xmm_t m; + uint8_t u8[XMM_SIZE / sizeof (uint8_t)]; + uint16_t u16[XMM_SIZE / sizeof (uint16_t)]; + uint32_t u32[XMM_SIZE / sizeof (uint32_t)]; + uint64_t u64[XMM_SIZE / sizeof (uint64_t)]; + double pd[XMM_SIZE / sizeof (double)]; +} rte_xmm_t; + +#ifdef RTE_ARCH_I686 +#define _mm_cvtsi128_si64(a) ({ \ + rte_xmm_t m; \ + m.m = (a); \ + (m.u64[0]); \ +}) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_COMMON__VECT_H_ */ diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index 033f542..be8aa1d 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -45,6 +45,8 @@ #include #include #include +#include +#include #ifdef __cplusplus extern "C" { @@ -331,6 +333,121 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips, return 0; } +/* Mask four results. */ +#define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff) + +/** + * Lookup four IP addresses in an LPM table. + * + * @param lpm + * LPM object handle + * @param ip + * Four IPs to be looked up in the LPM table + * @param hop + * Next hop of the most specific rule found for IP (valid on lookup hit only). + * This is an 4 elements array of two byte values. + * If the lookup was succesfull for the given IP, then least significant byte + * of the corresponding element is the actual next hop and the most + * significant byte is zero. + * If the lookup for the given IP failed, then corresponding element would + * contain default value, see description of then next parameter. + * @param defv + * Default value to populate into corresponding element of hop[] array, + * if lookup would fail. + */ +static inline void +rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4], + uint16_t defv) +{ + __m128i i24; + rte_xmm_t i8; + uint16_t tbl[4]; + uint64_t idx, pt; + + const __m128i mask8 = + _mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX); + + /* + * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries + * as one 64-bit value (0x0300030003000300). + */ + const uint64_t mask_xv = + ((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK | + (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 | + (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 | + (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48); + + /* + * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries + * as one 64-bit value (0x0100010001000100). + */ + const uint64_t mask_v = + ((uint64_t)RTE_LPM_LOOKUP_SUCCESS | + (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 | + (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 | + (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48); + + /* get 4 indexes for tbl24[]. */ + i24 = _mm_srli_epi32(ip, CHAR_BIT); + + /* extract values from tbl24[] */ + idx = _mm_cvtsi128_si64(i24); + i24 = _mm_srli_si128(i24, sizeof (uint64_t)); + + tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx]; + tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32]; + + idx = _mm_cvtsi128_si64(i24); + + tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx]; + tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32]; + + /* get 4 indexes for tbl8[]. */ + i8.m = _mm_and_si128(ip, mask8); + + pt = (uint64_t)tbl[0] | + (uint64_t)tbl[1] << 16 | + (uint64_t)tbl[2] << 32 | + (uint64_t)tbl[3] << 48; + + /* search successfully finished for all 4 IP addresses. */ + if (likely((pt & mask_xv) == mask_v)) { + uintptr_t ph = (uintptr_t)hop; + *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES; + return; + } + + if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[0] = i8.u32[0] + + (uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]]; + } + if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[1] = i8.u32[1] + + (uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]]; + } + if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[2] = i8.u32[2] + + (uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]]; + } + if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) == + RTE_LPM_VALID_EXT_ENTRY_BITMASK)) { + i8.u32[3] = i8.u32[3] + + (uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]]; + } + + hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv; + hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv; + hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv; + hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv; +} + #ifdef __cplusplus } #endif -- 1.7.7.6