From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <konstantin.ananyev@intel.com>
Received: from mga03.intel.com (mga03.intel.com [134.134.136.65])
 by dpdk.org (Postfix) with ESMTP id D8CF4206
 for <dev@dpdk.org>; Wed, 30 Aug 2017 16:56:39 +0200 (CEST)
Received: from fmsmga005.fm.intel.com ([10.253.24.32])
 by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;
 30 Aug 2017 07:56:38 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.41,448,1498546800"; d="scan'208";a="145673160"
Received: from irsmsx105.ger.corp.intel.com ([163.33.3.28])
 by fmsmga005.fm.intel.com with ESMTP; 30 Aug 2017 07:56:37 -0700
Received: from irsmsx103.ger.corp.intel.com ([169.254.3.49]) by
 irsmsx105.ger.corp.intel.com ([169.254.7.75]) with mapi id 14.03.0319.002;
 Wed, 30 Aug 2017 15:56:36 +0100
From: "Ananyev, Konstantin" <konstantin.ananyev@intel.com>
To: "Li, Xiaoyun" <xiaoyun.li@intel.com>, "Richardson, Bruce"
 <bruce.richardson@intel.com>
CC: "dev@dpdk.org" <dev@dpdk.org>, "Lu, Wenzhuo" <wenzhuo.lu@intel.com>,
 "Wang, Zhihong" <zhihong.wang@intel.com>, "Zhang, Qi Z"
 <qi.z.zhang@intel.com>, "Li, Xiaoyun" <xiaoyun.li@intel.com>
Thread-Topic: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
Thread-Index: AQHTHUbl34fJysnfq0aCHLzsp3DRbKKdBSow
Date: Wed, 30 Aug 2017 14:56:35 +0000
Message-ID: <2601191342CEEE43887BDE71AB9772584F23E343@IRSMSX103.ger.corp.intel.com>
References: <1503626773-184682-1-git-send-email-xiaoyun.li@intel.com>
 <1503626773-184682-2-git-send-email-xiaoyun.li@intel.com>
In-Reply-To: <1503626773-184682-2-git-send-email-xiaoyun.li@intel.com>
Accept-Language: en-IE, en-US
Content-Language: en-US
X-MS-Has-Attach: 
X-MS-TNEF-Correlator: 
dlp-product: dlpe-windows
dlp-version: 11.0.0.116
dlp-reaction: no-action
x-originating-ip: [163.33.239.180]
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: quoted-printable
MIME-Version: 1.0
Subject: Re: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Wed, 30 Aug 2017 14:56:40 -0000



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Xiaoyun Li
> Sent: Friday, August 25, 2017 3:06 AM
> To: Richardson, Bruce <bruce.richardson@intel.com>
> Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Wang, Zhihong <zhih=
ong.wang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
> Subject: [dpdk-dev] [PATCH 1/3] eal/x86: run-time dispatch over memcpy
>=20
> This patch dynamically selects functions of memcpy at run-time based
> on CPU flags that current machine supports. This patch uses function
> pointers which are bind to the relative functions at constrctor time.
> To make AVX512 instructions pass compilation, enable the switch in
> makefile.

It seems quite an overhead to add extra function call for each 16B movement=
...
Wouldn't it be better to have one func_ptr per implementation, i.e:
rte_memcpy_sse(), rte_memcpy_avx2(), rte_memcpy_avx512(), etc.?
Konstantin

>=20
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> ---
>  .../common/include/arch/x86/rte_memcpy.h           | 305 ++++++++++++---=
------
>  mk/machine/native/rte.vars.mk                      |   2 +
>  2 files changed, 181 insertions(+), 126 deletions(-)
>=20
> diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/li=
brte_eal/common/include/arch/x86/rte_memcpy.h
> index 74c280c..f68ebd2 100644
> --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
> @@ -45,11 +45,37 @@
>  #include <string.h>
>  #include <rte_vect.h>
>  #include <rte_common.h>
> +#include <rte_cpuflags.h>
> +#include <rte_log.h>
>=20
>  #ifdef __cplusplus
>  extern "C" {
>  #endif
>=20
> +/*
> + * Select SSE/AVX memory copy method as default one.
> + */
> +
> +static uint16_t alignment_mask =3D 0x0F;
> +
> +typedef void (*rte_mov16_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov32_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov64_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov128_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov256_t)(uint8_t *dst, const uint8_t *src);
> +typedef void (*rte_mov128blocks_t)(uint8_t *dst, const uint8_t *src, siz=
e_t n);
> +typedef void (*rte_mov512blocks_t)(uint8_t *dst, const uint8_t *src, siz=
e_t n);
> +typedef void * (*rte_memcpy_generic_t)(void *dst, const void *src, size_=
t n);
> +
> +static rte_mov16_t rte_mov16;
> +static rte_mov32_t rte_mov32;
> +static rte_mov64_t rte_mov64;
> +static rte_mov128_t rte_mov128;
> +static rte_mov256_t rte_mov256;
> +static rte_mov128blocks_t rte_mov128blocks;
> +static rte_mov512blocks_t rte_mov512blocks;
> +static rte_memcpy_generic_t rte_memcpy_generic;
> +
>  /**
>   * Copy bytes from one location to another. The locations must not overl=
ap.
>   *
> @@ -68,10 +94,6 @@ extern "C" {
>  static __rte_always_inline void *
>  rte_memcpy(void *dst, const void *src, size_t n);
>=20
> -#ifdef RTE_MACHINE_CPUFLAG_AVX512F
> -
> -#define ALIGNMENT_MASK 0x3F
> -
>  /**
>   * AVX512 implementation below
>   */
> @@ -81,7 +103,7 @@ rte_memcpy(void *dst, const void *src, size_t n);
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_AVX512F(uint8_t *dst, const uint8_t *src)
>  {
>  	__m128i xmm0;
>=20
> @@ -94,7 +116,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_AVX512F(uint8_t *dst, const uint8_t *src)
>  {
>  	__m256i ymm0;
>=20
> @@ -107,7 +129,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_AVX512F(uint8_t *dst, const uint8_t *src)
>  {
>  	__m512i zmm0;
>=20
> @@ -120,10 +142,10 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_AVX512F(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov64(dst + 0 * 64, src + 0 * 64);
> -	rte_mov64(dst + 1 * 64, src + 1 * 64);
> +	(*rte_mov64)(dst + 0 * 64, src + 0 * 64);
> +	(*rte_mov64)(dst + 1 * 64, src + 1 * 64);
>  }
>=20
>  /**
> @@ -131,12 +153,12 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> +rte_mov256_AVX512F(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov64(dst + 0 * 64, src + 0 * 64);
> -	rte_mov64(dst + 1 * 64, src + 1 * 64);
> -	rte_mov64(dst + 2 * 64, src + 2 * 64);
> -	rte_mov64(dst + 3 * 64, src + 3 * 64);
> +	(*rte_mov64)(dst + 0 * 64, src + 0 * 64);
> +	(*rte_mov64)(dst + 1 * 64, src + 1 * 64);
> +	(*rte_mov64)(dst + 2 * 64, src + 2 * 64);
> +	(*rte_mov64)(dst + 3 * 64, src + 3 * 64);
>  }
>=20
>  /**
> @@ -144,7 +166,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov128blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
>  {
>  	__m512i zmm0, zmm1;
>=20
> @@ -164,7 +186,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, si=
ze_t n)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov512blocks_AVX512F(uint8_t *dst, const uint8_t *src, size_t n)
>  {
>  	__m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7;
>=20
> @@ -192,7 +214,7 @@ rte_mov512blocks(uint8_t *dst, const uint8_t *src, si=
ze_t n)
>  }
>=20
>  static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_AVX512F(void *dst, const void *src, size_t n)
>  {
>  	uintptr_t dstu =3D (uintptr_t)dst;
>  	uintptr_t srcu =3D (uintptr_t)src;
> @@ -228,39 +250,39 @@ rte_memcpy_generic(void *dst, const void *src, size=
_t n)
>  	 * Fast way when copy size doesn't exceed 512 bytes
>  	 */
>  	if (n <=3D 32) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n,
> +		(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
>  				  (const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 64) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov32((uint8_t *)dst - 32 + n,
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov32)((uint8_t *)dst - 32 + n,
>  				  (const uint8_t *)src - 32 + n);
>  		return ret;
>  	}
>  	if (n <=3D 512) {
>  		if (n >=3D 256) {
>  			n -=3D 256;
> -			rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 256;
>  			dst =3D (uint8_t *)dst + 256;
>  		}
>  		if (n >=3D 128) {
>  			n -=3D 128;
> -			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 128;
>  			dst =3D (uint8_t *)dst + 128;
>  		}
>  COPY_BLOCK_128_BACK63:
>  		if (n > 64) {
> -			rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> -			rte_mov64((uint8_t *)dst - 64 + n,
> +			(*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov64)((uint8_t *)dst - 64 + n,
>  					  (const uint8_t *)src - 64 + n);
>  			return ret;
>  		}
>  		if (n > 0)
> -			rte_mov64((uint8_t *)dst - 64 + n,
> +			(*rte_mov64)((uint8_t *)dst - 64 + n,
>  					  (const uint8_t *)src - 64 + n);
>  		return ret;
>  	}
> @@ -272,7 +294,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	if (dstofss > 0) {
>  		dstofss =3D 64 - dstofss;
>  		n -=3D dstofss;
> -		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
>  		src =3D (const uint8_t *)src + dstofss;
>  		dst =3D (uint8_t *)dst + dstofss;
>  	}
> @@ -282,7 +304,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	 * Use copy block function for better instruction order control,
>  	 * which is important when load is unaligned.
>  	 */
> -	rte_mov512blocks((uint8_t *)dst, (const uint8_t *)src, n);
> +	(*rte_mov512blocks)((uint8_t *)dst, (const uint8_t *)src, n);
>  	bits =3D n;
>  	n =3D n & 511;
>  	bits -=3D n;
> @@ -295,7 +317,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	 * which is important when load is unaligned.
>  	 */
>  	if (n >=3D 128) {
> -		rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
> +		(*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
>  		bits =3D n;
>  		n =3D n & 127;
>  		bits -=3D n;
> @@ -309,10 +331,6 @@ rte_memcpy_generic(void *dst, const void *src, size_=
t n)
>  	goto COPY_BLOCK_128_BACK63;
>  }
>=20
> -#elif defined RTE_MACHINE_CPUFLAG_AVX2
> -
> -#define ALIGNMENT_MASK 0x1F
> -
>  /**
>   * AVX2 implementation below
>   */
> @@ -322,7 +340,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_AVX2(uint8_t *dst, const uint8_t *src)
>  {
>  	__m128i xmm0;
>=20
> @@ -335,7 +353,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_AVX2(uint8_t *dst, const uint8_t *src)
>  {
>  	__m256i ymm0;
>=20
> @@ -348,10 +366,10 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_AVX2(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
>  }
>=20
>  /**
> @@ -359,12 +377,12 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_AVX2(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> -	rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> -	rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> -	rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32);
> +	(*rte_mov32)((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
>  }
>=20
>  /**
> @@ -372,7 +390,7 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
> +rte_mov128blocks_AVX2(uint8_t *dst, const uint8_t *src, size_t n)
>  {
>  	__m256i ymm0, ymm1, ymm2, ymm3;
>=20
> @@ -392,7 +410,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, si=
ze_t n)
>  }
>=20
>  static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_AVX2(void *dst, const void *src, size_t n)
>  {
>  	uintptr_t dstu =3D (uintptr_t)dst;
>  	uintptr_t srcu =3D (uintptr_t)src;
> @@ -429,46 +447,46 @@ rte_memcpy_generic(void *dst, const void *src, size=
_t n)
>  	 * Fast way when copy size doesn't exceed 256 bytes
>  	 */
>  	if (n <=3D 32) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n,
> +		(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
>  				(const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 48) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst + 16, (const uint8_t *)src + 16);
> -		rte_mov16((uint8_t *)dst - 16 + n,
> +		(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst + 16, (const uint8_t *)src + 16);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
>  				(const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 64) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov32((uint8_t *)dst - 32 + n,
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov32)((uint8_t *)dst - 32 + n,
>  				(const uint8_t *)src - 32 + n);
>  		return ret;
>  	}
>  	if (n <=3D 256) {
>  		if (n >=3D 128) {
>  			n -=3D 128;
> -			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 128;
>  			dst =3D (uint8_t *)dst + 128;
>  		}
>  COPY_BLOCK_128_BACK31:
>  		if (n >=3D 64) {
>  			n -=3D 64;
> -			rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 64;
>  			dst =3D (uint8_t *)dst + 64;
>  		}
>  		if (n > 32) {
> -			rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -			rte_mov32((uint8_t *)dst - 32 + n,
> +			(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov32)((uint8_t *)dst - 32 + n,
>  					(const uint8_t *)src - 32 + n);
>  			return ret;
>  		}
>  		if (n > 0) {
> -			rte_mov32((uint8_t *)dst - 32 + n,
> +			(*rte_mov32)((uint8_t *)dst - 32 + n,
>  					(const uint8_t *)src - 32 + n);
>  		}
>  		return ret;
> @@ -481,7 +499,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	if (dstofss > 0) {
>  		dstofss =3D 32 - dstofss;
>  		n -=3D dstofss;
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
>  		src =3D (const uint8_t *)src + dstofss;
>  		dst =3D (uint8_t *)dst + dstofss;
>  	}
> @@ -489,7 +507,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	/**
>  	 * Copy 128-byte blocks
>  	 */
> -	rte_mov128blocks((uint8_t *)dst, (const uint8_t *)src, n);
> +	(*rte_mov128blocks)((uint8_t *)dst, (const uint8_t *)src, n);
>  	bits =3D n;
>  	n =3D n & 127;
>  	bits -=3D n;
> @@ -502,10 +520,6 @@ rte_memcpy_generic(void *dst, const void *src, size_=
t n)
>  	goto COPY_BLOCK_128_BACK31;
>  }
>=20
> -#else /* RTE_MACHINE_CPUFLAG */
> -
> -#define ALIGNMENT_MASK 0x0F
> -
>  /**
>   * SSE & AVX implementation below
>   */
> @@ -515,7 +529,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov16(uint8_t *dst, const uint8_t *src)
> +rte_mov16_DEFAULT(uint8_t *dst, const uint8_t *src)
>  {
>  	__m128i xmm0;
>=20
> @@ -528,10 +542,10 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov32(uint8_t *dst, const uint8_t *src)
> +rte_mov32_DEFAULT(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
>  }
>=20
>  /**
> @@ -539,12 +553,12 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov64(uint8_t *dst, const uint8_t *src)
> +rte_mov64_DEFAULT(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
>  }
>=20
>  /**
> @@ -552,16 +566,16 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov128(uint8_t *dst, const uint8_t *src)
> +rte_mov128_DEFAULT(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
>  }
>=20
>  /**
> @@ -569,24 +583,24 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
>   * locations should not overlap.
>   */
>  static inline void
> -rte_mov256(uint8_t *dst, const uint8_t *src)
> +rte_mov256_DEFAULT(uint8_t *dst, const uint8_t *src)
>  {
> -	rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> -	rte_mov16((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> -	rte_mov16((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> -	rte_mov16((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> -	rte_mov16((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> -	rte_mov16((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> -	rte_mov16((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> -	rte_mov16((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> -	rte_mov16((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> -	rte_mov16((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> -	rte_mov16((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
> -	rte_mov16((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
> -	rte_mov16((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
> -	rte_mov16((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
> -	rte_mov16((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
> -	rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 1 * 16, (const uint8_t *)src + 1 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 2 * 16, (const uint8_t *)src + 2 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 3 * 16, (const uint8_t *)src + 3 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 4 * 16, (const uint8_t *)src + 4 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 5 * 16, (const uint8_t *)src + 5 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 6 * 16, (const uint8_t *)src + 6 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 7 * 16, (const uint8_t *)src + 7 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 8 * 16, (const uint8_t *)src + 8 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 9 * 16, (const uint8_t *)src + 9 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 10 * 16, (const uint8_t *)src + 10 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 11 * 16, (const uint8_t *)src + 11 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 12 * 16, (const uint8_t *)src + 12 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 13 * 16, (const uint8_t *)src + 13 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 14 * 16, (const uint8_t *)src + 14 * 16);
> +	(*rte_mov16)((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16);
>  }
>=20
>  /**
> @@ -684,7 +698,7 @@ __extension__ ({                                     =
                 \
>  })
>=20
>  static inline void *
> -rte_memcpy_generic(void *dst, const void *src, size_t n)
> +rte_memcpy_generic_DEFAULT(void *dst, const void *src, size_t n)
>  {
>  	__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
>  	uintptr_t dstu =3D (uintptr_t)dst;
> @@ -722,19 +736,22 @@ rte_memcpy_generic(void *dst, const void *src, size=
_t n)
>  	 * Fast way when copy size doesn't exceed 512 bytes
>  	 */
>  	if (n <=3D 32) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> +		(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
> +				(const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 48) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
> +				(const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 64) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> -		rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst + 32, (const uint8_t *)src + 32);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
> +				(const uint8_t *)src - 16 + n);
>  		return ret;
>  	}
>  	if (n <=3D 128) {
> @@ -743,39 +760,42 @@ rte_memcpy_generic(void *dst, const void *src, size=
_t n)
>  	if (n <=3D 512) {
>  		if (n >=3D 256) {
>  			n -=3D 256;
> -			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> -			rte_mov128((uint8_t *)dst + 128, (const uint8_t *)src + 128);
> +			(*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov128)((uint8_t *)dst + 128,
> +					(const uint8_t *)src + 128);
>  			src =3D (const uint8_t *)src + 256;
>  			dst =3D (uint8_t *)dst + 256;
>  		}
>  COPY_BLOCK_255_BACK15:
>  		if (n >=3D 128) {
>  			n -=3D 128;
> -			rte_mov128((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov128)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 128;
>  			dst =3D (uint8_t *)dst + 128;
>  		}
>  COPY_BLOCK_128_BACK15:
>  		if (n >=3D 64) {
>  			n -=3D 64;
> -			rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 64;
>  			dst =3D (uint8_t *)dst + 64;
>  		}
>  COPY_BLOCK_64_BACK15:
>  		if (n >=3D 32) {
>  			n -=3D 32;
> -			rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
>  			src =3D (const uint8_t *)src + 32;
>  			dst =3D (uint8_t *)dst + 32;
>  		}
>  		if (n > 16) {
> -			rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -			rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> +			(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov16)((uint8_t *)dst - 16 + n,
> +					(const uint8_t *)src - 16 + n);
>  			return ret;
>  		}
>  		if (n > 0) {
> -			rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
> +			(*rte_mov16)((uint8_t *)dst - 16 + n,
> +					(const uint8_t *)src - 16 + n);
>  		}
>  		return ret;
>  	}
> @@ -790,7 +810,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  	if (dstofss > 0) {
>  		dstofss =3D 16 - dstofss + 16;
>  		n -=3D dstofss;
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
>  		src =3D (const uint8_t *)src + dstofss;
>  		dst =3D (uint8_t *)dst + dstofss;
>  	}
> @@ -804,7 +824,7 @@ rte_memcpy_generic(void *dst, const void *src, size_t=
 n)
>  		 * Copy 256-byte blocks
>  		 */
>  		for (; n >=3D 256; n -=3D 256) {
> -			rte_mov256((uint8_t *)dst, (const uint8_t *)src);
> +			(*rte_mov256)((uint8_t *)dst, (const uint8_t *)src);
>  			dst =3D (uint8_t *)dst + 256;
>  			src =3D (const uint8_t *)src + 256;
>  		}
> @@ -826,7 +846,40 @@ rte_memcpy_generic(void *dst, const void *src, size_=
t n)
>  	goto COPY_BLOCK_64_BACK15;
>  }
>=20
> -#endif /* RTE_MACHINE_CPUFLAG */
> +static void __attribute__((constructor))
> +rte_memcpy_init(void)
> +{
> +	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
> +		alignment_mask =3D 0x3F;
> +		rte_mov16 =3D rte_mov16_AVX512F;
> +		rte_mov32 =3D rte_mov32_AVX512F;
> +		rte_mov64 =3D rte_mov64_AVX512F;
> +		rte_mov128 =3D rte_mov128_AVX512F;
> +		rte_mov256 =3D rte_mov256_AVX512F;
> +		rte_mov128blocks =3D rte_mov128blocks_AVX512F;
> +		rte_mov512blocks =3D rte_mov512blocks_AVX512F;
> +		rte_memcpy_generic =3D rte_memcpy_generic_AVX512F;
> +		RTE_LOG(INFO, EAL, "AVX512 implementation of memcpy() is using!\n");
> +	} else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
> +		alignment_mask =3D 0x1F;
> +		rte_mov16 =3D rte_mov16_AVX2;
> +		rte_mov32 =3D rte_mov32_AVX2;
> +		rte_mov64 =3D rte_mov64_AVX2;
> +		rte_mov128 =3D rte_mov128_AVX2;
> +		rte_mov128blocks =3D rte_mov128blocks_AVX2;
> +		rte_memcpy_generic =3D rte_memcpy_generic_AVX2;
> +		RTE_LOG(INFO, EAL, "AVX2 implementation of memcpy() is using!\n");
> +	} else {
> +		alignment_mask =3D 0x0F;
> +		rte_mov16 =3D rte_mov16_DEFAULT;
> +		rte_mov32 =3D rte_mov32_DEFAULT;
> +		rte_mov64 =3D rte_mov64_DEFAULT;
> +		rte_mov128 =3D rte_mov128_DEFAULT;
> +		rte_mov256 =3D rte_mov256_DEFAULT;
> +		rte_memcpy_generic =3D rte_memcpy_generic_DEFAULT;
> +		RTE_LOG(INFO, EAL, "Default SSE/AVX implementation of memcpy() is usin=
g!\n");
> +	}
> +}
>=20
>  static inline void *
>  rte_memcpy_aligned(void *dst, const void *src, size_t n)
> @@ -858,8 +911,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t=
 n)
>=20
>  	/* Copy 16 <=3D size <=3D 32 bytes */
>  	if (n <=3D 32) {
> -		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov16((uint8_t *)dst - 16 + n,
> +		(*rte_mov16)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov16)((uint8_t *)dst - 16 + n,
>  				(const uint8_t *)src - 16 + n);
>=20
>  		return ret;
> @@ -867,8 +920,8 @@ rte_memcpy_aligned(void *dst, const void *src, size_t=
 n)
>=20
>  	/* Copy 32 < size <=3D 64 bytes */
>  	if (n <=3D 64) {
> -		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
> -		rte_mov32((uint8_t *)dst - 32 + n,
> +		(*rte_mov32)((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov32)((uint8_t *)dst - 32 + n,
>  				(const uint8_t *)src - 32 + n);
>=20
>  		return ret;
> @@ -876,13 +929,13 @@ rte_memcpy_aligned(void *dst, const void *src, size=
_t n)
>=20
>  	/* Copy 64 bytes blocks */
>  	for (; n >=3D 64; n -=3D 64) {
> -		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
> +		(*rte_mov64)((uint8_t *)dst, (const uint8_t *)src);
>  		dst =3D (uint8_t *)dst + 64;
>  		src =3D (const uint8_t *)src + 64;
>  	}
>=20
>  	/* Copy whatever left */
> -	rte_mov64((uint8_t *)dst - 64 + n,
> +	(*rte_mov64)((uint8_t *)dst - 64 + n,
>  			(const uint8_t *)src - 64 + n);
>=20
>  	return ret;
> @@ -891,10 +944,10 @@ rte_memcpy_aligned(void *dst, const void *src, size=
_t n)
>  static inline void *
>  rte_memcpy(void *dst, const void *src, size_t n)
>  {
> -	if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
> +	if (!(((uintptr_t)dst | (uintptr_t)src) & alignment_mask))
>  		return rte_memcpy_aligned(dst, src, n);
>  	else
> -		return rte_memcpy_generic(dst, src, n);
> +		return (*rte_memcpy_generic)(dst, src, n);
>  }
>=20
>  #ifdef __cplusplus
> diff --git a/mk/machine/native/rte.vars.mk b/mk/machine/native/rte.vars.m=
k
> index f7d98d0..cdcf6c6 100644
> --- a/mk/machine/native/rte.vars.mk
> +++ b/mk/machine/native/rte.vars.mk
> @@ -65,3 +65,5 @@ SSE42_SUPPORT=3D$(shell $(CC) -march=3Dnative -dM -E - =
</dev/null | grep SSE4_2)
>  ifeq ($(SSE42_SUPPORT),)
>      MACHINE_CFLAGS =3D -march=3Dcorei7
>  endif
> +
> +MACHINE_CFLAGS +=3D -mavx512f
> --
> 2.7.4