From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 25514A09EF; Tue, 15 Dec 2020 19:25:27 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 078DA2142; Tue, 15 Dec 2020 19:25:26 +0100 (CET) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 530E4A3 for ; Tue, 15 Dec 2020 19:25:24 +0100 (CET) IronPort-SDR: 0g1SyTdijJKxIMS4Xkz1w7lNMjFkygaMaaBcA5AEnz1a4v+2qa0g1k816aorl6gVZmQYHNrLcx DcCh5l1kFUBg== X-IronPort-AV: E=McAfee;i="6000,8403,9836"; a="172360302" X-IronPort-AV: E=Sophos;i="5.78,422,1599548400"; d="scan'208";a="172360302" Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Dec 2020 10:25:22 -0800 IronPort-SDR: KHZxqvxUMQqPoNlfxgGuFUU7HFWUdZB6mGXQdA5bYBMqjwfSQYBrV0LsyFMJG84N70SimN2l8p Yc8QcP7Lm5Sw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.78,422,1599548400"; d="scan'208";a="383809902" Received: from silpixa00400072.ir.intel.com ([10.237.222.213]) by fmsmga004.fm.intel.com with ESMTP; 15 Dec 2020 10:25:21 -0800 From: Vladimir Medvedkin To: dev@dpdk.org Date: Tue, 15 Dec 2020 18:25:19 +0000 Message-Id: <1608056719-400147-1-git-send-email-vladimir.medvedkin@intel.com> X-Mailer: git-send-email 2.7.4 Subject: [dpdk-dev] [PATCH] fib6: improve lookup performance X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Improved performance for AVX512 FIB6 lookup by doubling the number of flows being processed Signed-off-by: Vladimir Medvedkin --- lib/librte_fib/trie_avx512.c | 230 ++++++++++++++++++++++++++++--------------- 1 file changed, 153 insertions(+), 77 deletions(-) diff --git a/lib/librte_fib/trie_avx512.c b/lib/librte_fib/trie_avx512.c index b1c9e4e..d4d70d8 100644 --- a/lib/librte_fib/trie_avx512.c +++ b/lib/librte_fib/trie_avx512.c @@ -67,16 +67,22 @@ transpose_x8(uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], } static __rte_always_inline void -trie_vec_lookup_x16(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], +trie_vec_lookup_x16x2(void *p, uint8_t ips[32][RTE_FIB6_IPV6_ADDR_SIZE], uint64_t *next_hops, int size) { struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; const __m512i zero = _mm512_set1_epi32(0); const __m512i lsb = _mm512_set1_epi32(1); const __m512i two_lsb = _mm512_set1_epi32(3); - __m512i first, second, third, fourth; /*< IPv6 four byte chunks */ - __m512i idxes, res, shuf_idxes; - __m512i tmp, tmp2, bytes, byte_chunk, base_idxes; + /* IPv6 four byte chunks */ + __m512i first_1, second_1, third_1, fourth_1; + __m512i first_2, second_2, third_2, fourth_2; + __m512i idxes_1, res_1; + __m512i idxes_2, res_2; + __m512i shuf_idxes; + __m512i tmp_1, tmp2_1, bytes_1, byte_chunk_1; + __m512i tmp_2, tmp2_2, bytes_2, byte_chunk_2; + __m512i base_idxes; /* used to mask gather values if size is 2 (16 bit next hops) */ const __m512i res_msk = _mm512_set1_epi32(UINT16_MAX); const __rte_x86_zmm_t bswap = { @@ -92,29 +98,41 @@ trie_vec_lookup_x16(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], }; const __mmask64 k = 0x1111111111111111; int i = 3; - __mmask16 msk_ext, new_msk; + __mmask16 msk_ext_1, new_msk_1; + __mmask16 msk_ext_2, new_msk_2; __mmask16 exp_msk = 0x5555; - transpose_x16(ips, &first, &second, &third, &fourth); + transpose_x16(ips, &first_1, &second_1, &third_1, &fourth_1); + transpose_x16(ips + 16, &first_2, &second_2, &third_2, &fourth_2); /* get_tbl24_idx() for every 4 byte chunk */ - idxes = _mm512_shuffle_epi8(first, bswap.z); + idxes_1 = _mm512_shuffle_epi8(first_1, bswap.z); + idxes_2 = _mm512_shuffle_epi8(first_2, bswap.z); /** * lookup in tbl24 * Put it inside branch to make compiller happy with -O0 */ if (size == sizeof(uint16_t)) { - res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 2); - res = _mm512_and_epi32(res, res_msk); - } else - res = _mm512_i32gather_epi32(idxes, (const int *)dp->tbl24, 4); - + res_1 = _mm512_i32gather_epi32(idxes_1, + (const int *)dp->tbl24, 2); + res_2 = _mm512_i32gather_epi32(idxes_2, + (const int *)dp->tbl24, 2); + res_1 = _mm512_and_epi32(res_1, res_msk); + res_2 = _mm512_and_epi32(res_2, res_msk); + } else { + res_1 = _mm512_i32gather_epi32(idxes_1, + (const int *)dp->tbl24, 4); + res_2 = _mm512_i32gather_epi32(idxes_2, + (const int *)dp->tbl24, 4); + } /* get extended entries indexes */ - msk_ext = _mm512_test_epi32_mask(res, lsb); + msk_ext_1 = _mm512_test_epi32_mask(res_1, lsb); + msk_ext_2 = _mm512_test_epi32_mask(res_2, lsb); - tmp = _mm512_srli_epi32(res, 1); + tmp_1 = _mm512_srli_epi32(res_1, 1); + tmp_2 = _mm512_srli_epi32(res_2, 1); /* idxes to retrieve bytes */ shuf_idxes = _mm512_setr_epi32(3, 7, 11, 15, @@ -128,24 +146,44 @@ trie_vec_lookup_x16(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], 48, 52, 56, 60); /* traverse down the trie */ - while (msk_ext) { - idxes = _mm512_maskz_slli_epi32(msk_ext, tmp, 8); - byte_chunk = (i < 8) ? - ((i >= 4) ? second : first) : - ((i >= 12) ? fourth : third); - bytes = _mm512_maskz_shuffle_epi8(k, byte_chunk, shuf_idxes); - idxes = _mm512_maskz_add_epi32(msk_ext, idxes, bytes); + while (msk_ext_1 || msk_ext_2) { + idxes_1 = _mm512_maskz_slli_epi32(msk_ext_1, tmp_1, 8); + idxes_2 = _mm512_maskz_slli_epi32(msk_ext_2, tmp_2, 8); + byte_chunk_1 = (i < 8) ? + ((i >= 4) ? second_1 : first_1) : + ((i >= 12) ? fourth_1 : third_1); + byte_chunk_2 = (i < 8) ? + ((i >= 4) ? second_2 : first_2) : + ((i >= 12) ? fourth_2 : third_2); + bytes_1 = _mm512_maskz_shuffle_epi8(k, byte_chunk_1, + shuf_idxes); + bytes_2 = _mm512_maskz_shuffle_epi8(k, byte_chunk_2, + shuf_idxes); + idxes_1 = _mm512_maskz_add_epi32(msk_ext_1, idxes_1, bytes_1); + idxes_2 = _mm512_maskz_add_epi32(msk_ext_2, idxes_2, bytes_2); if (size == sizeof(uint16_t)) { - tmp = _mm512_mask_i32gather_epi32(zero, msk_ext, - idxes, (const int *)dp->tbl8, 2); - tmp = _mm512_and_epi32(tmp, res_msk); - } else - tmp = _mm512_mask_i32gather_epi32(zero, msk_ext, - idxes, (const int *)dp->tbl8, 4); - new_msk = _mm512_test_epi32_mask(tmp, lsb); - res = _mm512_mask_blend_epi32(msk_ext ^ new_msk, res, tmp); - tmp = _mm512_srli_epi32(tmp, 1); - msk_ext = new_msk; + tmp_1 = _mm512_mask_i32gather_epi32(zero, msk_ext_1, + idxes_1, (const int *)dp->tbl8, 2); + tmp_2 = _mm512_mask_i32gather_epi32(zero, msk_ext_2, + idxes_2, (const int *)dp->tbl8, 2); + tmp_1 = _mm512_and_epi32(tmp_1, res_msk); + tmp_2 = _mm512_and_epi32(tmp_2, res_msk); + } else { + tmp_1 = _mm512_mask_i32gather_epi32(zero, msk_ext_1, + idxes_1, (const int *)dp->tbl8, 4); + tmp_2 = _mm512_mask_i32gather_epi32(zero, msk_ext_2, + idxes_2, (const int *)dp->tbl8, 4); + } + new_msk_1 = _mm512_test_epi32_mask(tmp_1, lsb); + new_msk_2 = _mm512_test_epi32_mask(tmp_2, lsb); + res_1 = _mm512_mask_blend_epi32(msk_ext_1 ^ new_msk_1, res_1, + tmp_1); + res_2 = _mm512_mask_blend_epi32(msk_ext_2 ^ new_msk_2, res_2, + tmp_2); + tmp_1 = _mm512_srli_epi32(tmp_1, 1); + tmp_2 = _mm512_srli_epi32(tmp_2, 1); + msk_ext_1 = new_msk_1; + msk_ext_2 = new_msk_2; shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, lsb); shuf_idxes = _mm512_and_epi32(shuf_idxes, two_lsb); @@ -153,27 +191,43 @@ trie_vec_lookup_x16(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], i++; } - res = _mm512_srli_epi32(res, 1); - tmp = _mm512_maskz_expand_epi32(exp_msk, res); - __m256i tmp256; - tmp256 = _mm512_extracti32x8_epi32(res, 1); - tmp2 = _mm512_maskz_expand_epi32(exp_msk, - _mm512_castsi256_si512(tmp256)); - _mm512_storeu_si512(next_hops, tmp); - _mm512_storeu_si512(next_hops + 8, tmp2); + /* get rid of 1 LSB, now we have HN in every epi32 */ + res_1 = _mm512_srli_epi32(res_1, 1); + res_2 = _mm512_srli_epi32(res_2, 1); + /* extract first half of NH's each in epi64 chunk */ + tmp_1 = _mm512_maskz_expand_epi32(exp_msk, res_1); + tmp_2 = _mm512_maskz_expand_epi32(exp_msk, res_2); + /* extract second half of NH's */ + __m256i tmp256_1, tmp256_2; + tmp256_1 = _mm512_extracti32x8_epi32(res_1, 1); + tmp256_2 = _mm512_extracti32x8_epi32(res_2, 1); + tmp2_1 = _mm512_maskz_expand_epi32(exp_msk, + _mm512_castsi256_si512(tmp256_1)); + tmp2_2 = _mm512_maskz_expand_epi32(exp_msk, + _mm512_castsi256_si512(tmp256_2)); + /* return NH's from two sets of registers */ + _mm512_storeu_si512(next_hops, tmp_1); + _mm512_storeu_si512(next_hops + 8, tmp2_1); + _mm512_storeu_si512(next_hops + 16, tmp_2); + _mm512_storeu_si512(next_hops + 24, tmp2_2); } static void -trie_vec_lookup_x8_8b(void *p, uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], +trie_vec_lookup_x8x2_8b(void *p, uint8_t ips[16][RTE_FIB6_IPV6_ADDR_SIZE], uint64_t *next_hops) { struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p; const __m512i zero = _mm512_set1_epi32(0); const __m512i lsb = _mm512_set1_epi32(1); const __m512i three_lsb = _mm512_set1_epi32(7); - __m512i first, second; /*< IPv6 eight byte chunks */ - __m512i idxes, res, shuf_idxes; - __m512i tmp, bytes, byte_chunk, base_idxes; + /* IPv6 eight byte chunks */ + __m512i first_1, second_1; + __m512i first_2, second_2; + __m512i idxes_1, res_1; + __m512i idxes_2, res_2; + __m512i shuf_idxes, base_idxes; + __m512i tmp_1, bytes_1, byte_chunk_1; + __m512i tmp_2, bytes_2, byte_chunk_2; const __rte_x86_zmm_t bswap = { .u8 = { 2, 1, 0, 255, 255, 255, 255, 255, 10, 9, 8, 255, 255, 255, 255, 255, @@ -187,19 +241,25 @@ trie_vec_lookup_x8_8b(void *p, uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], }; const __mmask64 k = 0x101010101010101; int i = 3; - __mmask8 msk_ext, new_msk; + __mmask8 msk_ext_1, new_msk_1; + __mmask8 msk_ext_2, new_msk_2; - transpose_x8(ips, &first, &second); + transpose_x8(ips, &first_1, &second_1); + transpose_x8(ips + 8, &first_2, &second_2); /* get_tbl24_idx() for every 4 byte chunk */ - idxes = _mm512_shuffle_epi8(first, bswap.z); + idxes_1 = _mm512_shuffle_epi8(first_1, bswap.z); + idxes_2 = _mm512_shuffle_epi8(first_2, bswap.z); /* lookup in tbl24 */ - res = _mm512_i64gather_epi64(idxes, (const void *)dp->tbl24, 8); + res_1 = _mm512_i64gather_epi64(idxes_1, (const void *)dp->tbl24, 8); + res_2 = _mm512_i64gather_epi64(idxes_2, (const void *)dp->tbl24, 8); /* get extended entries indexes */ - msk_ext = _mm512_test_epi64_mask(res, lsb); + msk_ext_1 = _mm512_test_epi64_mask(res_1, lsb); + msk_ext_2 = _mm512_test_epi64_mask(res_2, lsb); - tmp = _mm512_srli_epi64(res, 1); + tmp_1 = _mm512_srli_epi64(res_1, 1); + tmp_2 = _mm512_srli_epi64(res_2, 1); /* idxes to retrieve bytes */ shuf_idxes = _mm512_setr_epi64(3, 11, 19, 27, 35, 43, 51, 59); @@ -207,17 +267,31 @@ trie_vec_lookup_x8_8b(void *p, uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], base_idxes = _mm512_setr_epi64(0, 8, 16, 24, 32, 40, 48, 56); /* traverse down the trie */ - while (msk_ext) { - idxes = _mm512_maskz_slli_epi64(msk_ext, tmp, 8); - byte_chunk = (i < 8) ? first : second; - bytes = _mm512_maskz_shuffle_epi8(k, byte_chunk, shuf_idxes); - idxes = _mm512_maskz_add_epi64(msk_ext, idxes, bytes); - tmp = _mm512_mask_i64gather_epi64(zero, msk_ext, - idxes, (const void *)dp->tbl8, 8); - new_msk = _mm512_test_epi64_mask(tmp, lsb); - res = _mm512_mask_blend_epi64(msk_ext ^ new_msk, res, tmp); - tmp = _mm512_srli_epi64(tmp, 1); - msk_ext = new_msk; + while (msk_ext_1 || msk_ext_2) { + idxes_1 = _mm512_maskz_slli_epi64(msk_ext_1, tmp_1, 8); + idxes_2 = _mm512_maskz_slli_epi64(msk_ext_2, tmp_2, 8); + byte_chunk_1 = (i < 8) ? first_1 : second_1; + byte_chunk_2 = (i < 8) ? first_2 : second_2; + bytes_1 = _mm512_maskz_shuffle_epi8(k, byte_chunk_1, + shuf_idxes); + bytes_2 = _mm512_maskz_shuffle_epi8(k, byte_chunk_2, + shuf_idxes); + idxes_1 = _mm512_maskz_add_epi64(msk_ext_1, idxes_1, bytes_1); + idxes_2 = _mm512_maskz_add_epi64(msk_ext_2, idxes_2, bytes_2); + tmp_1 = _mm512_mask_i64gather_epi64(zero, msk_ext_1, + idxes_1, (const void *)dp->tbl8, 8); + tmp_2 = _mm512_mask_i64gather_epi64(zero, msk_ext_2, + idxes_2, (const void *)dp->tbl8, 8); + new_msk_1 = _mm512_test_epi64_mask(tmp_1, lsb); + new_msk_2 = _mm512_test_epi64_mask(tmp_2, lsb); + res_1 = _mm512_mask_blend_epi64(msk_ext_1 ^ new_msk_1, res_1, + tmp_1); + res_2 = _mm512_mask_blend_epi64(msk_ext_2 ^ new_msk_2, res_2, + tmp_2); + tmp_1 = _mm512_srli_epi64(tmp_1, 1); + tmp_2 = _mm512_srli_epi64(tmp_2, 1); + msk_ext_1 = new_msk_1; + msk_ext_2 = new_msk_2; shuf_idxes = _mm512_maskz_add_epi8(k, shuf_idxes, lsb); shuf_idxes = _mm512_and_epi64(shuf_idxes, three_lsb); @@ -225,8 +299,10 @@ trie_vec_lookup_x8_8b(void *p, uint8_t ips[8][RTE_FIB6_IPV6_ADDR_SIZE], i++; } - res = _mm512_srli_epi64(res, 1); - _mm512_storeu_si512(next_hops, res); + res_1 = _mm512_srli_epi64(res_1, 1); + res_2 = _mm512_srli_epi64(res_2, 1); + _mm512_storeu_si512(next_hops, res_1); + _mm512_storeu_si512(next_hops + 8, res_2); } void @@ -234,12 +310,12 @@ rte_trie_vec_lookup_bulk_2b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], uint64_t *next_hops, const unsigned int n) { uint32_t i; - for (i = 0; i < (n / 16); i++) { - trie_vec_lookup_x16(p, (uint8_t (*)[16])&ips[i * 16][0], - next_hops + i * 16, sizeof(uint16_t)); + for (i = 0; i < (n / 32); i++) { + trie_vec_lookup_x16x2(p, (uint8_t (*)[16])&ips[i * 32][0], + next_hops + i * 32, sizeof(uint16_t)); } - rte_trie_lookup_bulk_2b(p, (uint8_t (*)[16])&ips[i * 16][0], - next_hops + i * 16, n - i * 16); + rte_trie_lookup_bulk_2b(p, (uint8_t (*)[16])&ips[i * 32][0], + next_hops + i * 32, n - i * 32); } void @@ -247,12 +323,12 @@ rte_trie_vec_lookup_bulk_4b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], uint64_t *next_hops, const unsigned int n) { uint32_t i; - for (i = 0; i < (n / 16); i++) { - trie_vec_lookup_x16(p, (uint8_t (*)[16])&ips[i * 16][0], - next_hops + i * 16, sizeof(uint32_t)); + for (i = 0; i < (n / 32); i++) { + trie_vec_lookup_x16x2(p, (uint8_t (*)[16])&ips[i * 32][0], + next_hops + i * 32, sizeof(uint32_t)); } - rte_trie_lookup_bulk_4b(p, (uint8_t (*)[16])&ips[i * 16][0], - next_hops + i * 16, n - i * 16); + rte_trie_lookup_bulk_4b(p, (uint8_t (*)[16])&ips[i * 32][0], + next_hops + i * 32, n - i * 32); } void @@ -260,10 +336,10 @@ rte_trie_vec_lookup_bulk_8b(void *p, uint8_t ips[][RTE_FIB6_IPV6_ADDR_SIZE], uint64_t *next_hops, const unsigned int n) { uint32_t i; - for (i = 0; i < (n / 8); i++) { - trie_vec_lookup_x8_8b(p, (uint8_t (*)[16])&ips[i * 8][0], - next_hops + i * 8); + for (i = 0; i < (n / 16); i++) { + trie_vec_lookup_x8x2_8b(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16); } - rte_trie_lookup_bulk_8b(p, (uint8_t (*)[16])&ips[i * 8][0], - next_hops + i * 8, n - i * 8); + rte_trie_lookup_bulk_8b(p, (uint8_t (*)[16])&ips[i * 16][0], + next_hops + i * 16, n - i * 16); } -- 2.7.4