From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3862243BFE; Sun, 3 Mar 2024 10:46:25 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0A394402F0; Sun, 3 Mar 2024 10:46:25 +0100 (CET) Received: from dkmailrelay1.smartsharesystems.com (smartserver.smartsharesystems.com [77.243.40.215]) by mails.dpdk.org (Postfix) with ESMTP id 89CB7402BD for ; Sun, 3 Mar 2024 10:46:23 +0100 (CET) Received: from smartserver.smartsharesystems.com (smartserver.smartsharesys.local [192.168.4.10]) by dkmailrelay1.smartsharesystems.com (Postfix) with ESMTP id 6960620C9F; Sun, 3 Mar 2024 10:46:23 +0100 (CET) Received: from dkrd2.smartsharesys.local ([192.168.4.12]) by smartserver.smartsharesystems.com with Microsoft SMTPSVC(6.0.3790.4675); Sun, 3 Mar 2024 10:46:23 +0100 From: =?UTF-8?q?Morten=20Br=C3=B8rup?= To: bruce.richardson@intel.com, konstantin.v.ananyev@yandex.ru, stephen@networkplumber.org Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, =?UTF-8?q?Morten=20Br=C3=B8rup?= Subject: [PATCH v2] eal/x86: improve rte_memcpy const size 16 performance Date: Sun, 3 Mar 2024 10:46:21 +0100 Message-Id: <20240303094621.16404-1-mb@smartsharesystems.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20240302234812.9137-1-mb@smartsharesystems.com> References: <20240302234812.9137-1-mb@smartsharesystems.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-OriginalArrivalTime: 03 Mar 2024 09:46:23.0156 (UTC) FILETIME=[A6F1CF40:01DA6D4F] X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org When the rte_memcpy() size is 16, the same 16 bytes are copied twice. In the case where the size is known to be 16 at build tine, omit the duplicate copy. Reduced the amount of effectively copy-pasted code by using #ifdef inside functions instead of outside functions. Suggested-by: Stephen Hemminger Signed-off-by: Morten Brørup --- v2: * For GCC, version 11 is required for proper AVX handling; if older GCC version, treat AVX as SSE. Clang does not have this issue. Note: Original code always treated AVX as SSE, regardless of compiler. * Do not add copyright. (Stephen Hemminger) --- lib/eal/x86/include/rte_memcpy.h | 231 ++++++++----------------------- 1 file changed, 56 insertions(+), 175 deletions(-) diff --git a/lib/eal/x86/include/rte_memcpy.h b/lib/eal/x86/include/rte_memcpy.h index 72a92290e0..d1df841f5e 100644 --- a/lib/eal/x86/include/rte_memcpy.h +++ b/lib/eal/x86/include/rte_memcpy.h @@ -91,14 +91,6 @@ rte_mov15_or_less(void *dst, const void *src, size_t n) return ret; } -#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512 - -#define ALIGNMENT_MASK 0x3F - -/** - * AVX512 implementation below - */ - /** * Copy 16 bytes from one location to another, * locations should not overlap. @@ -119,10 +111,16 @@ rte_mov16(uint8_t *dst, const uint8_t *src) static __rte_always_inline void rte_mov32(uint8_t *dst, const uint8_t *src) { +#if (defined __AVX512F__ && defined RTE_MEMCPY_AVX512) || defined __AVX2__ || \ + (defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 110000))) __m256i ymm0; ymm0 = _mm256_loadu_si256((const __m256i *)src); _mm256_storeu_si256((__m256i *)dst, ymm0); +#else /* SSE implementation */ + rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16); + rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16); +#endif } /** @@ -132,10 +130,15 @@ rte_mov32(uint8_t *dst, const uint8_t *src) static __rte_always_inline void rte_mov64(uint8_t *dst, const uint8_t *src) { +#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512 __m512i zmm0; zmm0 = _mm512_loadu_si512((const void *)src); _mm512_storeu_si512((void *)dst, zmm0); +#else /* AVX2, AVX & SSE implementation */ + rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32); + rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32); +#endif } /** @@ -156,12 +159,18 @@ rte_mov128(uint8_t *dst, const uint8_t *src) static __rte_always_inline void rte_mov256(uint8_t *dst, const uint8_t *src) { - rte_mov64(dst + 0 * 64, src + 0 * 64); - rte_mov64(dst + 1 * 64, src + 1 * 64); - rte_mov64(dst + 2 * 64, src + 2 * 64); - rte_mov64(dst + 3 * 64, src + 3 * 64); + rte_mov128(dst + 0 * 128, src + 0 * 128); + rte_mov128(dst + 1 * 128, src + 1 * 128); } +#if defined __AVX512F__ && defined RTE_MEMCPY_AVX512 + +/** + * AVX512 implementation below + */ + +#define ALIGNMENT_MASK 0x3F + /** * Copy 128-byte blocks from one location to another, * locations should not overlap. @@ -231,12 +240,22 @@ rte_memcpy_generic(void *dst, const void *src, size_t n) /** * Fast way when copy size doesn't exceed 512 bytes */ + if (__builtin_constant_p(n) && n == 32) { + rte_mov32((uint8_t *)dst, (const uint8_t *)src); + return ret; + } if (n <= 32) { rte_mov16((uint8_t *)dst, (const uint8_t *)src); + if (__builtin_constant_p(n) && n == 16) + return ret; /* avoid (harmless) duplicate copy */ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); return ret; } + if (__builtin_constant_p(n) && n == 64) { + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + return ret; + } if (n <= 64) { rte_mov32((uint8_t *)dst, (const uint8_t *)src); rte_mov32((uint8_t *)dst - 32 + n, @@ -313,80 +332,14 @@ rte_memcpy_generic(void *dst, const void *src, size_t n) goto COPY_BLOCK_128_BACK63; } -#elif defined __AVX2__ - -#define ALIGNMENT_MASK 0x1F +#elif defined __AVX2__ || \ + (defined __AVX__ && !(defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 110000))) /** - * AVX2 implementation below + * AVX2 (and AVX, unless too old GCC version) implementation below */ -/** - * Copy 16 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov16(uint8_t *dst, const uint8_t *src) -{ - __m128i xmm0; - - xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src); - _mm_storeu_si128((__m128i *)(void *)dst, xmm0); -} - -/** - * Copy 32 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov32(uint8_t *dst, const uint8_t *src) -{ - __m256i ymm0; - - ymm0 = _mm256_loadu_si256((const __m256i *)(const void *)src); - _mm256_storeu_si256((__m256i *)(void *)dst, ymm0); -} - -/** - * Copy 64 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov64(uint8_t *dst, const uint8_t *src) -{ - rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32); - rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32); -} - -/** - * Copy 128 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov128(uint8_t *dst, const uint8_t *src) -{ - rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32); - rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32); - rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32); - rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32); -} - -/** - * Copy 256 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov256(uint8_t *dst, const uint8_t *src) -{ - rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32); - rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32); - rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32); - rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32); - rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32); - rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32); - rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32); - rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32); -} +#define ALIGNMENT_MASK 0x1F /** * Copy 128-byte blocks from one location to another, @@ -437,15 +390,14 @@ rte_memcpy_generic(void *dst, const void *src, size_t n) /** * Fast way when copy size doesn't exceed 256 bytes */ - if (n <= 32) { - rte_mov16((uint8_t *)dst, (const uint8_t *)src); - rte_mov16((uint8_t *)dst - 16 + n, - (const uint8_t *)src - 16 + n); + if (__builtin_constant_p(n) && n == 32) { + rte_mov32((uint8_t *)dst, (const uint8_t *)src); return ret; } - if (n <= 48) { + if (n <= 32) { rte_mov16((uint8_t *)dst, (const uint8_t *)src); - rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16); + if (__builtin_constant_p(n) && n == 16) + return ret; /* avoid (harmless) duplicate copy */ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); return ret; @@ -513,90 +465,11 @@ rte_memcpy_generic(void *dst, const void *src, size_t n) #else /* __AVX512F__ */ -#define ALIGNMENT_MASK 0x0F - -/** - * SSE & AVX implementation below - */ - -/** - * Copy 16 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov16(uint8_t *dst, const uint8_t *src) -{ - __m128i xmm0; - - xmm0 = _mm_loadu_si128((const __m128i *)(const void *)src); - _mm_storeu_si128((__m128i *)(void *)dst, xmm0); -} - -/** - * Copy 32 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov32(uint8_t *dst, const uint8_t *src) -{ - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16); - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16); -} - /** - * Copy 64 bytes from one location to another, - * locations should not overlap. - */ -static __rte_always_inline void -rte_mov64(uint8_t *dst, const uint8_t *src) -{ - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16); - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16); - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16); - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16); -} - -/** - * Copy 128 bytes from one location to another, - * locations should not overlap. + * SSE (and AVX, with too old GCC version) implementation below */ -static __rte_always_inline void -rte_mov128(uint8_t *dst, const uint8_t *src) -{ - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16); - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16); - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16); - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16); - rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16); - rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16); - rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16); - rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16); -} -/** - * Copy 256 bytes from one location to another, - * locations should not overlap. - */ -static inline void -rte_mov256(uint8_t *dst, const uint8_t *src) -{ - rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16); - rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16); - rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16); - rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16); - rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16); - rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16); - rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16); - rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16); - rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16); - rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16); - rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16); - rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16); - rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16); - rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16); - rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16); - rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16); -} +#define ALIGNMENT_MASK 0x0F /** * Macro for copying unaligned block from one location to another with constant load offset, @@ -712,17 +585,15 @@ rte_memcpy_generic(void *dst, const void *src, size_t n) */ if (n <= 32) { rte_mov16((uint8_t *)dst, (const uint8_t *)src); - rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); - return ret; - } - if (n <= 48) { - rte_mov32((uint8_t *)dst, (const uint8_t *)src); + if (__builtin_constant_p(n) && n == 16) + return ret; /* avoid (harmless) duplicate copy */ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); return ret; } if (n <= 64) { rte_mov32((uint8_t *)dst, (const uint8_t *)src); - rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32); + if (n > 48) + rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32); rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); return ret; } @@ -828,8 +699,14 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n) } /* Copy 16 <= size <= 32 bytes */ + if (__builtin_constant_p(n) && n == 32) { + rte_mov32((uint8_t *)dst, (const uint8_t *)src); + return ret; + } if (n <= 32) { rte_mov16((uint8_t *)dst, (const uint8_t *)src); + if (__builtin_constant_p(n) && n == 16) + return ret; /* avoid (harmless) duplicate copy */ rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); @@ -837,6 +714,10 @@ rte_memcpy_aligned(void *dst, const void *src, size_t n) } /* Copy 32 < size <= 64 bytes */ + if (__builtin_constant_p(n) && n == 64) { + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + return ret; + } if (n <= 64) { rte_mov32((uint8_t *)dst, (const uint8_t *)src); rte_mov32((uint8_t *)dst - 32 + n, -- 2.17.1