* [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice @ 2020-09-10 6:55 Leyi Rong 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path Leyi Rong ` (4 more replies) 0 siblings, 5 replies; 25+ messages in thread From: Leyi Rong @ 2020-09-10 6:55 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong This patchset aims to support AVX512 vPMD on ice. Has dependency on http://patches.dpdk.org/cover/76096/ which support SIMD bitwidth selection properly. Leyi Rong (2): net/ice: add AVX512 vector path net/ice: optimize Tx path on AVX512 vPMD drivers/net/ice/ice_rxtx.c | 88 ++- drivers/net/ice/ice_rxtx.h | 11 + drivers/net/ice/ice_rxtx_vec_avx512.c | 927 ++++++++++++++++++++++++++ drivers/net/ice/ice_rxtx_vec_common.h | 36 +- drivers/net/ice/meson.build | 13 + 5 files changed, 1050 insertions(+), 25 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong @ 2020-09-10 6:55 ` Leyi Rong 2020-09-10 9:32 ` Bruce Richardson 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong ` (3 subsequent siblings) 4 siblings, 1 reply; 25+ messages in thread From: Leyi Rong @ 2020-09-10 6:55 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c to support ice AVX512 vPMD. This patch aims to enable AVX512 on ice vPMD. Main changes are focus on Rx path compared with AVX2 vPMD. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.c | 88 ++- drivers/net/ice/ice_rxtx.h | 7 + drivers/net/ice/ice_rxtx_vec_avx512.c | 824 ++++++++++++++++++++++++++ drivers/net/ice/meson.build | 13 + 4 files changed, 914 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index ccddae687..096bf098f 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1852,6 +1852,10 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 if (dev->rx_pkt_burst == ice_recv_pkts_vec || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || +#ifdef CC_AVX512_SUPPORT + dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || +#endif dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2) return ptypes; @@ -2909,6 +2913,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_rx_queue *rxq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -2924,10 +2929,18 @@ ice_set_rx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + if (rte_get_max_simd_bitwidth() >= RTE_MAX_512_SIMD && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_get_max_simd_bitwidth() - >= RTE_MAX_256_SIMD) + rte_get_max_simd_bitwidth() >= RTE_MAX_256_SIMD) use_avx2 = true; } else { @@ -2937,20 +2950,37 @@ ice_set_rx_function(struct rte_eth_dev *dev) if (ad->rx_vec_allowed) { if (dev->data->scattered_rx) { - PMD_DRV_LOG(DEBUG, + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Scattered Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_scattered_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Scattered Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? + dev->rx_pkt_burst = use_avx2 ? ice_recv_scattered_pkts_vec_avx2 : ice_recv_scattered_pkts_vec; + } } else { - PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, + "Using %sVector Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? - ice_recv_pkts_vec_avx2 : - ice_recv_pkts_vec; + dev->rx_pkt_burst = use_avx2 ? + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; + } } return; } @@ -2987,6 +3017,10 @@ static const struct { { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, { ice_recv_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, + { ice_recv_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, { ice_recv_pkts_vec_avx2, "Vector AVX2" }, { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, @@ -3091,6 +3125,7 @@ ice_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_tx_queue *txq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3106,10 +3141,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + if (rte_get_max_simd_bitwidth() >= RTE_MAX_512_SIMD && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_get_max_simd_bitwidth() - >= RTE_MAX_256_SIMD) + rte_get_max_simd_bitwidth() >= RTE_MAX_256_SIMD) use_avx2 = true; } else { @@ -3118,12 +3161,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } if (ad->tx_vec_allowed) { - PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - dev->tx_pkt_burst = use_avx2 ? - ice_xmit_pkts_vec_avx2 : - ice_xmit_pkts_vec; + if (use_avx512) { + PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + ice_xmit_pkts_vec_avx2 : + ice_xmit_pkts_vec; + } dev->tx_pkt_prepare = NULL; return; @@ -3148,6 +3197,9 @@ static const struct { { ice_xmit_pkts_simple, "Scalar Simple" }, { ice_xmit_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, { ice_xmit_pkts_vec, "Vector SSE" }, #endif diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 2fdcfb7d0..a39b41c05 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -200,6 +200,13 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c new file mode 100644 index 000000000..6a9d0a8ea --- /dev/null +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include <x86intrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#define ICE_DESCS_PER_LOOP_AVX 8 + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp, + rte_lcore_id()); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* We need to pull 'n' more MBUFs into the software ring */ + if (cache->len < ICE_RXQ_REARM_THRESH) { + uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size - + cache->len); + + int ret = rte_mempool_ops_dequeue_bulk(rxq->mp, + &cache->objs[cache->len], req); + if (ret == 0) { + cache->len += req; + } else { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128 + ((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + } + + const __m512i iova_offsets = _mm512_set1_epi64 + (offsetof(struct rte_mbuf, buf_iova)); + const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /* shuffle the iova into correct slots. Values 4-7 will contain + * zeros, so use 7 for a zero-value. + */ + const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0); +#else + const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); +#endif + + /* fill up the rxd in vector, process 8 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) { + const __m512i mbuf_ptrs = _mm512_loadu_si512 + (&cache->objs[cache->len - 8]); + _mm512_store_si512(rxep, mbuf_ptrs); + + /* gather iova of mbuf0-7 into one zmm reg */ + const __m512i iova_base_addrs = _mm512_i64gather_epi64 + (_mm512_add_epi64(mbuf_ptrs, iova_offsets), + 0, /* base */ + 1 /* scale */); + const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs, + headroom); +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + const __m512i iovas0 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 0)); + const __m512i iovas1 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 1)); + + /* permute leaves iova 2-3 in hdr_addr of desc 0-1 + * but these are ignored by driver since header split not + * enabled. Similarly for desc 4 & 5. + */ + const __m512i desc0_1 = _mm512_permutexvar_epi64 + (permute_idx, iovas0); + const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8); + + const __m512i desc4_5 = _mm512_permutexvar_epi64 + (permute_idx, iovas1); + const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8); + + _mm512_store_si512((void *)rxdp, desc0_1); + _mm512_store_si512((void *)(rxdp + 2), desc2_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_5); + _mm512_store_si512((void *)(rxdp + 6), desc6_7); +#else + /* permute leaves iova 4-7 in hdr_addr of desc 0-3 + * but these are ignored by driver since header split not + * enabled. + */ + const __m512i desc0_3 = _mm512_permutexvar_epi64 + (permute_idx, iova_addrs); + const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8); + + _mm512_store_si512((void *)rxdp, desc0_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_7); +#endif + rxep += 8, rxdp += 8, cache->len -= 8; + } + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline uint16_t +_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m512i crc_adjust = + _mm512_set4_epi32 + (0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0 /* ignore non-length fields */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + ICE_RX_DESC_STATUS_EOF_S); + + /* mask to shuffle from desc. to mbuf (4 descriptors)*/ + const __m512i shuf_msk = + _mm512_set4_epi32 + (/* octet 12~15, 32 bits rss */ + 15 << 24 | 14 << 16 | 13 << 8 | 12, + /* octet 10~11, 16 bits vlan_macip */ + /* octet 4~5, 16 bits data_len */ + 11 << 24 | 10 << 16 | 5 << 8 | 4, + /* skip hi 16 bits pkt_len, zero out */ + /* octet 4~5, 16 bits pkt_len */ + 0xFFFF << 16 | 5 << 8 | 4, + /* pkt_type set as unknown */ + 0xFFFFFFFF + ); + + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* 2nd 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* 2nd 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += ICE_DESCS_PER_LOOP_AVX, + rxdp += ICE_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m512i raw_desc0_3, raw_desc4_7; + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; + + /* load in descriptors, in reverse order */ + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + + raw_desc4_7 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc4_5), + raw_desc6_7, 1); + raw_desc0_3 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc0_1), + raw_desc2_3, 1); + + if (split_packet) { + int j; + + for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 0-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk); + __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk); + + mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust); + mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptype_mask = + _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptypes4_7 = + _mm512_and_si512(raw_desc4_7, ptype_mask); + const __m512i ptypes0_3 = + _mm512_and_si512(raw_desc0_3, ptype_mask); + + const __m256i ptypes6_7 = + _mm512_extracti64x4_epi64(ptypes4_7, 1); + const __m256i ptypes4_5 = + _mm512_extracti64x4_epi64(ptypes4_7, 0); + const __m256i ptypes2_3 = + _mm512_extracti64x4_epi64(ptypes0_3, 1); + const __m256i ptypes0_1 = + _mm512_extracti64x4_epi64(ptypes0_3, 0); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + const __m512i ptype4_7 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype7], + 0, 0, 0, ptype_tbl[ptype6], + 0, 0, 0, ptype_tbl[ptype5], + 0, 0, 0, ptype_tbl[ptype4]); + const __m512i ptype0_3 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype3], + 0, 0, 0, ptype_tbl[ptype2], + 0, 0, 0, ptype_tbl[ptype1], + 0, 0, 0, ptype_tbl[ptype0]); + + mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); + mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + + /** + * use permute/extract to get status content + * After the operations, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + /* merge the status bits into one register */ + const __m512i status_permute_msk = _mm512_set_epi32 + (0, 0, 0, 0, + 0, 0, 0, 0, + 22, 30, 6, 14, + 18, 26, 2, 10); + const __m512i raw_status0_7 = _mm512_permutex2var_epi32 + (raw_desc4_7, status_permute_msk, raw_desc0_3); + __m256i status0_7 = _mm512_extracti64x4_epi64 + (raw_status0_7, 0); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + + const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += ICE_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != ICE_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly */ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then */ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > ICE_VPMD_RX_BURST) { + uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, ICE_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < ICE_VPMD_RX_BURST) + return retval; + } + return retval + ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + ice_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + ice_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + ice_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, + &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index e6fe74487..c0b23015e 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -33,6 +33,19 @@ if arch_subdir == 'x86' c_args: [cflags, '-mavx2']) objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') endif + + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or not machine_args.contains('-mno-avx512f') + if cc.has_argument('-mavx512f') and cc.has_argument('-mavx512bw') + cflags += ['-DCC_AVX512_SUPPORT'] + ice_avx512_lib = static_library('ice_avx512_lib', + 'ice_rxtx_vec_avx512.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') + endif + endif endif sources += files('ice_dcf.c', -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path Leyi Rong @ 2020-09-10 9:32 ` Bruce Richardson 0 siblings, 0 replies; 25+ messages in thread From: Bruce Richardson @ 2020-09-10 9:32 UTC (permalink / raw) To: Leyi Rong; +Cc: wenzhuo.lu, qi.z.zhang, dev On Thu, Sep 10, 2020 at 02:55:03PM +0800, Leyi Rong wrote: > Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c > to support ice AVX512 vPMD. > > This patch aims to enable AVX512 on ice vPMD. Main changes are focus > on Rx path compared with AVX2 vPMD. > > Signed-off-by: Leyi Rong <leyi.rong@intel.com> > Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> > --- > drivers/net/ice/ice_rxtx.c | 88 ++- > drivers/net/ice/ice_rxtx.h | 7 + > drivers/net/ice/ice_rxtx_vec_avx512.c | 824 ++++++++++++++++++++++++++ > drivers/net/ice/meson.build | 13 + > 4 files changed, 914 insertions(+), 18 deletions(-) > create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c <snip> > + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or not machine_args.contains('-mno-avx512f') I think you need to split these conditions: If the cpu flag is set, you just want to unconditionally add the file to the build list. However, if no-avx512 flag is set, you want to skip the whole block completely. > + if cc.has_argument('-mavx512f') and cc.has_argument('-mavx512bw') > + cflags += ['-DCC_AVX512_SUPPORT'] > + ice_avx512_lib = static_library('ice_avx512_lib', > + 'ice_rxtx_vec_avx512.c', > + dependencies: [static_rte_ethdev, > + static_rte_kvargs, static_rte_hash], > + include_directories: includes, > + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) > + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') > + endif > + endif > endif > > sources += files('ice_dcf.c', > -- > 2.17.1 > ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path Leyi Rong @ 2020-09-10 6:55 ` Leyi Rong 2020-09-15 1:17 ` Wang, Haiyue 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong ` (2 subsequent siblings) 4 siblings, 1 reply; 25+ messages in thread From: Leyi Rong @ 2020-09-10 6:55 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong Optimize Tx path by using AVX512 instructions and vectorize the tx free bufs process. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.h | 4 + drivers/net/ice/ice_rxtx_vec_avx512.c | 147 ++++++++++++++++++++++---- drivers/net/ice/ice_rxtx_vec_common.h | 36 +++++-- 3 files changed, 158 insertions(+), 29 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index a39b41c05..08084f5c5 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -88,6 +88,10 @@ struct ice_tx_entry { uint16_t last_id; }; +struct ice_vec_tx_entry { + struct rte_mbuf *mbuf; +}; + struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ rte_iova_t tx_ring_dma; /* TX ring DMA address */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 6a9d0a8ea..1bc1191d0 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -665,6 +665,108 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +static __rte_always_inline int +ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) +{ + struct ice_vec_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh - 1) + */ + txep = (void *)txq->sw_ring; + txep += txq->tx_next_dd - (n - 1); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) { + struct rte_mempool *mp = txep[0].mbuf->pool; + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + void **cache_objs = &cache->objs[cache->len]; + + if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { + rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); + goto done; + } + + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it + * crosses the cache flush threshold) is flushed to the ring. + */ + /* Add elements back into the cache */ + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); + const __m512i d = _mm512_loadu_si512(&txep[copied + 24]); + + _mm512_storeu_si512(&cache_objs[copied], a); + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); + copied += 32; + } + cache->len += n; + + if (cache->len >= cache->flushthresh) { + rte_mempool_ops_enqueue_bulk + (mp, &cache->objs[cache->size], + cache->len - cache->size); + cache->len = cache->size; + } + goto done; + } + + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + +done: + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + static inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) @@ -686,13 +788,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp, const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); - /* if unaligned on 32-bit boundary, do one to align */ - if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { - ice_vtx1(txdp, *pkt, flags); - nb_pkts--, txdp++, pkt++; - } - - /* do two at a time while possible, in bursts */ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { uint64_t hi_qw3 = hi_qw_tmpl | @@ -711,20 +806,17 @@ ice_vtx(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); - __m256i desc2_3 = - _mm256_set_epi64x + __m512i desc0_3 = + _mm512_set_epi64 (hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_physaddr + pkt[2]->data_off); - __m256i desc0_1 = - _mm256_set_epi64x - (hi_qw1, + pkt[2]->buf_physaddr + pkt[2]->data_off, + hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off, hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off); - _mm256_store_si256((void *)(txdp + 2), desc2_3); - _mm256_store_si256((void *)txdp, desc0_1); + _mm512_storeu_si512((void *)txdp, desc0_3); } /* do any last ones */ @@ -734,13 +826,23 @@ ice_vtx(volatile struct ice_tx_desc *txdp, } } +static __rte_always_inline void +ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + static inline uint16_t ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; - struct ice_tx_entry *txep; + struct ice_vec_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = ICE_TD_CMD; uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; @@ -749,7 +851,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_avx512(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -757,13 +859,14 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txep = (void *)txq->sw_ring; + txep += tx_id; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { - ice_tx_backlog_entry(txep, tx_pkts, n); + ice_tx_backlog_entry_avx512(txep, tx_pkts, n); ice_vtx(txdp, tx_pkts, n - 1, flags); tx_pkts += (n - 1); @@ -777,11 +880,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ - txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txdp = txq->tx_ring; + txep = (void *)txq->sw_ring; } - ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit); ice_vtx(txdp, tx_pkts, nb_commit, flags); diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 46e3be98a..ee0bb1798 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -189,16 +189,38 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) * so need to free remains more carefully. */ i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { + +#ifdef CC_AVX512_SUPPORT + struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev; + + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) { + struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; + + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + } else +#endif + { + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } - i = 0; - } - for (; i < txq->tx_tail; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; } } -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong @ 2020-09-15 1:17 ` Wang, Haiyue 0 siblings, 0 replies; 25+ messages in thread From: Wang, Haiyue @ 2020-09-15 1:17 UTC (permalink / raw) To: Rong, Leyi, Richardson, Bruce, Lu, Wenzhuo, Zhang, Qi Z; +Cc: dev, Rong, Leyi > -----Original Message----- > From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong > Sent: Thursday, September 10, 2020 14:55 > To: Richardson, Bruce <bruce.richardson@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Zhang, Qi Z > <qi.z.zhang@intel.com> > Cc: dev@dpdk.org; Rong, Leyi <leyi.rong@intel.com> > Subject: [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD > > Optimize Tx path by using AVX512 instructions and vectorize the > tx free bufs process. > > Signed-off-by: Leyi Rong <leyi.rong@intel.com> > Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> > --- > drivers/net/ice/ice_rxtx.h | 4 + > drivers/net/ice/ice_rxtx_vec_avx512.c | 147 ++++++++++++++++++++++---- > drivers/net/ice/ice_rxtx_vec_common.h | 36 +++++-- > 3 files changed, 158 insertions(+), 29 deletions(-) > > - _mm256_set_epi64x > + __m512i desc0_3 = > + _mm512_set_epi64 > (hi_qw3, > pkt[3]->buf_physaddr + pkt[3]->data_off, > hi_qw2, > - pkt[2]->buf_physaddr + pkt[2]->data_off); > - __m256i desc0_1 = > - _mm256_set_epi64x > - (hi_qw1, > + pkt[2]->buf_physaddr + pkt[2]->data_off, > + hi_qw1, > pkt[1]->buf_physaddr + pkt[1]->data_off, > hi_qw0, > pkt[0]->buf_physaddr + pkt[0]->data_off); 'buf_physaddr' will be remove in 20.11, we need to use 'buf_iova' instead. > -- > 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path Leyi Rong 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong @ 2020-09-18 3:35 ` Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 1/3] net/ice: add AVX512 vector path Leyi Rong ` (2 more replies) 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong 4 siblings, 3 replies; 25+ messages in thread From: Leyi Rong @ 2020-09-18 3:35 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong This patchset aims to support AVX512 vPMD on ice. Has dependency on http://patches.dpdk.org/cover/76096/ which support SIMD bitwidth selection properly. --- v2: - No internal judgement when RTE_MACHINE_CPUFLAG_AVX512F is set in meson.build. - Add RSS hash parsing as default RXDID is set to #22. - Use buf_iova instead of buf_physaddr, as buf_physaddr will be removed. Leyi Rong (3): net/ice: add AVX512 vector path net/ice: add RSS hash parsing in AVX512 path net/ice: optimize Tx path on AVX512 vPMD drivers/net/ice/ice_rxtx.c | 88 ++- drivers/net/ice/ice_rxtx.h | 11 + drivers/net/ice/ice_rxtx_vec_avx512.c | 1018 +++++++++++++++++++++++++ drivers/net/ice/ice_rxtx_vec_common.h | 36 +- drivers/net/ice/meson.build | 11 + 5 files changed, 1139 insertions(+), 25 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 1/3] net/ice: add AVX512 vector path 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong @ 2020-09-18 3:35 ` Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-09-18 3:35 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong, Bruce Richardson Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c to support ice AVX512 vPMD. This patch aims to enable AVX512 on ice vPMD. Main changes are focus on Rx path compared with AVX2 vPMD. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.c | 88 ++- drivers/net/ice/ice_rxtx.h | 7 + drivers/net/ice/ice_rxtx_vec_avx512.c | 824 ++++++++++++++++++++++++++ drivers/net/ice/meson.build | 11 + 4 files changed, 912 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 5a29af743c..cc7946c0b7 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1876,6 +1876,10 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 if (dev->rx_pkt_burst == ice_recv_pkts_vec || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || +#ifdef CC_AVX512_SUPPORT + dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || +#endif dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2) return ptypes; @@ -2933,6 +2937,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_rx_queue *rxq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -2948,10 +2953,18 @@ ice_set_rx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + if (rte_get_max_simd_bitwidth() >= RTE_MAX_512_SIMD && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_get_max_simd_bitwidth() - >= RTE_MAX_256_SIMD) + rte_get_max_simd_bitwidth() >= RTE_MAX_256_SIMD) use_avx2 = true; } else { @@ -2961,20 +2974,37 @@ ice_set_rx_function(struct rte_eth_dev *dev) if (ad->rx_vec_allowed) { if (dev->data->scattered_rx) { - PMD_DRV_LOG(DEBUG, + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Scattered Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_scattered_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Scattered Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? + dev->rx_pkt_burst = use_avx2 ? ice_recv_scattered_pkts_vec_avx2 : ice_recv_scattered_pkts_vec; + } } else { - PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, + "Using %sVector Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? - ice_recv_pkts_vec_avx2 : - ice_recv_pkts_vec; + dev->rx_pkt_burst = use_avx2 ? + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; + } } return; } @@ -3011,6 +3041,10 @@ static const struct { { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, { ice_recv_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, + { ice_recv_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, { ice_recv_pkts_vec_avx2, "Vector AVX2" }, { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, @@ -3115,6 +3149,7 @@ ice_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_tx_queue *txq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3130,10 +3165,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + if (rte_get_max_simd_bitwidth() >= RTE_MAX_512_SIMD && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_get_max_simd_bitwidth() - >= RTE_MAX_256_SIMD) + rte_get_max_simd_bitwidth() >= RTE_MAX_256_SIMD) use_avx2 = true; } else { @@ -3142,12 +3185,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } if (ad->tx_vec_allowed) { - PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - dev->tx_pkt_burst = use_avx2 ? - ice_xmit_pkts_vec_avx2 : - ice_xmit_pkts_vec; + if (use_avx512) { + PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + ice_xmit_pkts_vec_avx2 : + ice_xmit_pkts_vec; + } dev->tx_pkt_prepare = NULL; return; @@ -3172,6 +3221,9 @@ static const struct { { ice_xmit_pkts_simple, "Scalar Simple" }, { ice_xmit_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, { ice_xmit_pkts_vec, "Vector SSE" }, #endif diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 9fa57b3b27..c13e54b251 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -243,6 +243,13 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c new file mode 100644 index 0000000000..6a9d0a8eaa --- /dev/null +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include <x86intrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#define ICE_DESCS_PER_LOOP_AVX 8 + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp, + rte_lcore_id()); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* We need to pull 'n' more MBUFs into the software ring */ + if (cache->len < ICE_RXQ_REARM_THRESH) { + uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size - + cache->len); + + int ret = rte_mempool_ops_dequeue_bulk(rxq->mp, + &cache->objs[cache->len], req); + if (ret == 0) { + cache->len += req; + } else { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128 + ((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + } + + const __m512i iova_offsets = _mm512_set1_epi64 + (offsetof(struct rte_mbuf, buf_iova)); + const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /* shuffle the iova into correct slots. Values 4-7 will contain + * zeros, so use 7 for a zero-value. + */ + const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0); +#else + const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); +#endif + + /* fill up the rxd in vector, process 8 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) { + const __m512i mbuf_ptrs = _mm512_loadu_si512 + (&cache->objs[cache->len - 8]); + _mm512_store_si512(rxep, mbuf_ptrs); + + /* gather iova of mbuf0-7 into one zmm reg */ + const __m512i iova_base_addrs = _mm512_i64gather_epi64 + (_mm512_add_epi64(mbuf_ptrs, iova_offsets), + 0, /* base */ + 1 /* scale */); + const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs, + headroom); +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + const __m512i iovas0 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 0)); + const __m512i iovas1 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 1)); + + /* permute leaves iova 2-3 in hdr_addr of desc 0-1 + * but these are ignored by driver since header split not + * enabled. Similarly for desc 4 & 5. + */ + const __m512i desc0_1 = _mm512_permutexvar_epi64 + (permute_idx, iovas0); + const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8); + + const __m512i desc4_5 = _mm512_permutexvar_epi64 + (permute_idx, iovas1); + const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8); + + _mm512_store_si512((void *)rxdp, desc0_1); + _mm512_store_si512((void *)(rxdp + 2), desc2_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_5); + _mm512_store_si512((void *)(rxdp + 6), desc6_7); +#else + /* permute leaves iova 4-7 in hdr_addr of desc 0-3 + * but these are ignored by driver since header split not + * enabled. + */ + const __m512i desc0_3 = _mm512_permutexvar_epi64 + (permute_idx, iova_addrs); + const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8); + + _mm512_store_si512((void *)rxdp, desc0_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_7); +#endif + rxep += 8, rxdp += 8, cache->len -= 8; + } + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline uint16_t +_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m512i crc_adjust = + _mm512_set4_epi32 + (0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0 /* ignore non-length fields */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + ICE_RX_DESC_STATUS_EOF_S); + + /* mask to shuffle from desc. to mbuf (4 descriptors)*/ + const __m512i shuf_msk = + _mm512_set4_epi32 + (/* octet 12~15, 32 bits rss */ + 15 << 24 | 14 << 16 | 13 << 8 | 12, + /* octet 10~11, 16 bits vlan_macip */ + /* octet 4~5, 16 bits data_len */ + 11 << 24 | 10 << 16 | 5 << 8 | 4, + /* skip hi 16 bits pkt_len, zero out */ + /* octet 4~5, 16 bits pkt_len */ + 0xFFFF << 16 | 5 << 8 | 4, + /* pkt_type set as unknown */ + 0xFFFFFFFF + ); + + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* 2nd 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* 2nd 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += ICE_DESCS_PER_LOOP_AVX, + rxdp += ICE_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m512i raw_desc0_3, raw_desc4_7; + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; + + /* load in descriptors, in reverse order */ + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + + raw_desc4_7 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc4_5), + raw_desc6_7, 1); + raw_desc0_3 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc0_1), + raw_desc2_3, 1); + + if (split_packet) { + int j; + + for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 0-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk); + __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk); + + mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust); + mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptype_mask = + _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptypes4_7 = + _mm512_and_si512(raw_desc4_7, ptype_mask); + const __m512i ptypes0_3 = + _mm512_and_si512(raw_desc0_3, ptype_mask); + + const __m256i ptypes6_7 = + _mm512_extracti64x4_epi64(ptypes4_7, 1); + const __m256i ptypes4_5 = + _mm512_extracti64x4_epi64(ptypes4_7, 0); + const __m256i ptypes2_3 = + _mm512_extracti64x4_epi64(ptypes0_3, 1); + const __m256i ptypes0_1 = + _mm512_extracti64x4_epi64(ptypes0_3, 0); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + const __m512i ptype4_7 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype7], + 0, 0, 0, ptype_tbl[ptype6], + 0, 0, 0, ptype_tbl[ptype5], + 0, 0, 0, ptype_tbl[ptype4]); + const __m512i ptype0_3 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype3], + 0, 0, 0, ptype_tbl[ptype2], + 0, 0, 0, ptype_tbl[ptype1], + 0, 0, 0, ptype_tbl[ptype0]); + + mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); + mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + + /** + * use permute/extract to get status content + * After the operations, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + /* merge the status bits into one register */ + const __m512i status_permute_msk = _mm512_set_epi32 + (0, 0, 0, 0, + 0, 0, 0, 0, + 22, 30, 6, 14, + 18, 26, 2, 10); + const __m512i raw_status0_7 = _mm512_permutex2var_epi32 + (raw_desc4_7, status_permute_msk, raw_desc0_3); + __m256i status0_7 = _mm512_extracti64x4_epi64 + (raw_status0_7, 0); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + + const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += ICE_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != ICE_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly */ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then */ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > ICE_VPMD_RX_BURST) { + uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, ICE_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < ICE_VPMD_RX_BURST) + return retval; + } + return retval + ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + ice_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + ice_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + ice_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, + &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index e6fe744879..a972ed668e 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -33,6 +33,17 @@ if arch_subdir == 'x86' c_args: [cflags, '-mavx2']) objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') endif + + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) + cflags += ['-DCC_AVX512_SUPPORT'] + ice_avx512_lib = static_library('ice_avx512_lib', + 'ice_rxtx_vec_avx512.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') + endif endif sources += files('ice_dcf.c', -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 2/3] net/ice: add RSS hash parsing in AVX512 path 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 1/3] net/ice: add AVX512 vector path Leyi Rong @ 2020-09-18 3:35 ` Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-09-18 3:35 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong Support RSS hash parsing in AVX512 data path as the default RXDID is set to #22, that means the RSS hash field locates in the 2nd 16B of each Flex Rx descriptor. Signed-off-by: Leyi Rong <leyi.rong@intel.com> --- drivers/net/ice/ice_rxtx_vec_avx512.c | 105 ++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 6a9d0a8eaa..a2a5d9987a 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -176,8 +176,8 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* mask to shuffle from desc. to mbuf (4 descriptors)*/ const __m512i shuf_msk = _mm512_set4_epi32 - (/* octet 12~15, 32 bits rss */ - 15 << 24 | 14 << 16 | 13 << 8 | 12, + (/* rss hash parsed separately */ + 0xFFFFFFFF, /* octet 10~11, 16 bits vlan_macip */ /* octet 4~5, 16 bits data_len */ 11 << 24 | 10 << 16 | 5 << 8 | 4, @@ -399,6 +399,11 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + /** * use permute/extract to get status content * After the operations, the packets status flags are in the @@ -438,6 +443,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* merge flags */ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif + /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. @@ -471,11 +567,6 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, _mm256_srli_si256(mbuf_flags, 4), 0x04); - const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); - const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); - const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); - const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); - /* permute to add in the rx_descriptor e.g. rss fields */ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 3/3] net/ice: optimize Tx path on AVX512 vPMD 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong @ 2020-09-18 3:35 ` Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-09-18 3:35 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong, Bruce Richardson Optimize Tx path by using AVX512 instructions and vectorize the tx free bufs process. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.h | 4 + drivers/net/ice/ice_rxtx_vec_avx512.c | 155 +++++++++++++++++++++----- drivers/net/ice/ice_rxtx_vec_common.h | 36 ++++-- 3 files changed, 162 insertions(+), 33 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index c13e54b251..6eb3447d26 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -91,6 +91,10 @@ struct ice_tx_entry { uint16_t last_id; }; +struct ice_vec_tx_entry { + struct rte_mbuf *mbuf; +}; + struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ rte_iova_t tx_ring_dma; /* TX ring DMA address */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index a2a5d9987a..e5e7cc1482 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -756,6 +756,108 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +static __rte_always_inline int +ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) +{ + struct ice_vec_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh - 1) + */ + txep = (void *)txq->sw_ring; + txep += txq->tx_next_dd - (n - 1); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) { + struct rte_mempool *mp = txep[0].mbuf->pool; + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + void **cache_objs = &cache->objs[cache->len]; + + if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { + rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); + goto done; + } + + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it + * crosses the cache flush threshold) is flushed to the ring. + */ + /* Add elements back into the cache */ + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); + const __m512i d = _mm512_loadu_si512(&txep[copied + 24]); + + _mm512_storeu_si512(&cache_objs[copied], a); + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); + copied += 32; + } + cache->len += n; + + if (cache->len >= cache->flushthresh) { + rte_mempool_ops_enqueue_bulk + (mp, &cache->objs[cache->size], + cache->len - cache->size); + cache->len = cache->size; + } + goto done; + } + + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + +done: + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + static inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) @@ -766,7 +868,7 @@ ice_vtx1(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); __m128i descriptor = _mm_set_epi64x(high_qw, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } @@ -777,13 +879,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp, const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); - /* if unaligned on 32-bit boundary, do one to align */ - if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { - ice_vtx1(txdp, *pkt, flags); - nb_pkts--, txdp++, pkt++; - } - - /* do two at a time while possible, in bursts */ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { uint64_t hi_qw3 = hi_qw_tmpl | @@ -802,20 +897,17 @@ ice_vtx(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); - __m256i desc2_3 = - _mm256_set_epi64x + __m512i desc0_3 = + _mm512_set_epi64 (hi_qw3, - pkt[3]->buf_physaddr + pkt[3]->data_off, + pkt[3]->buf_iova + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_physaddr + pkt[2]->data_off); - __m256i desc0_1 = - _mm256_set_epi64x - (hi_qw1, - pkt[1]->buf_physaddr + pkt[1]->data_off, + pkt[2]->buf_iova + pkt[2]->data_off, + hi_qw1, + pkt[1]->buf_iova + pkt[1]->data_off, hi_qw0, - pkt[0]->buf_physaddr + pkt[0]->data_off); - _mm256_store_si256((void *)(txdp + 2), desc2_3); - _mm256_store_si256((void *)txdp, desc0_1); + pkt[0]->buf_iova + pkt[0]->data_off); + _mm512_storeu_si512((void *)txdp, desc0_3); } /* do any last ones */ @@ -825,13 +917,23 @@ ice_vtx(volatile struct ice_tx_desc *txdp, } } +static __rte_always_inline void +ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + static inline uint16_t ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; - struct ice_tx_entry *txep; + struct ice_vec_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = ICE_TD_CMD; uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; @@ -840,7 +942,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_avx512(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -848,13 +950,14 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txep = (void *)txq->sw_ring; + txep += tx_id; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { - ice_tx_backlog_entry(txep, tx_pkts, n); + ice_tx_backlog_entry_avx512(txep, tx_pkts, n); ice_vtx(txdp, tx_pkts, n - 1, flags); tx_pkts += (n - 1); @@ -868,11 +971,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ - txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txdp = txq->tx_ring; + txep = (void *)txq->sw_ring; } - ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit); ice_vtx(txdp, tx_pkts, nb_commit, flags); diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index e2019c8d6a..ae2ac29f2a 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -189,16 +189,38 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) * so need to free remains more carefully. */ i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { + +#ifdef CC_AVX512_SUPPORT + struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev; + + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) { + struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; + + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + } else +#endif + { + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } - i = 0; - } - for (; i < txq->tx_tail; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; } } -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong ` (2 preceding siblings ...) 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong @ 2020-10-20 10:51 ` Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 1/3] net/ice: add AVX512 vector path Leyi Rong ` (2 more replies) 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong 4 siblings, 3 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-20 10:51 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong This patchset aims to support AVX512 vPMD on ice. --- v3: - Code rebased. v2: - No internal judgement when RTE_MACHINE_CPUFLAG_AVX512F is set in meson.build. - Add RSS hash parsing as default RXDID is set to #22. - Use buf_iova instead of buf_physaddr, as buf_physaddr will be removed. Leyi Rong (3): net/ice: add AVX512 vector path net/ice: add RSS hash parsing in AVX512 path net/ice: optimize Tx path on AVX512 vPMD drivers/net/ice/ice_rxtx.c | 90 ++- drivers/net/ice/ice_rxtx.h | 11 + drivers/net/ice/ice_rxtx_vec_avx512.c | 1018 +++++++++++++++++++++++++ drivers/net/ice/ice_rxtx_vec_common.h | 36 +- drivers/net/ice/meson.build | 11 + 5 files changed, 1141 insertions(+), 25 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 1/3] net/ice: add AVX512 vector path 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong @ 2020-10-20 10:51 ` Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-20 10:51 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c to support ice AVX512 vPMD. This patch aims to enable AVX512 on ice vPMD. Main changes are focus on Rx path compared with AVX2 vPMD. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.c | 90 ++- drivers/net/ice/ice_rxtx.h | 7 + drivers/net/ice/ice_rxtx_vec_avx512.c | 824 ++++++++++++++++++++++++++ drivers/net/ice/meson.build | 11 + 4 files changed, 914 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index ee576c362a..f6291894cd 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1930,6 +1930,10 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 if (dev->rx_pkt_burst == ice_recv_pkts_vec || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || +#ifdef CC_AVX512_SUPPORT + dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || +#endif dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2) return ptypes; @@ -2987,6 +2991,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_rx_queue *rxq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3001,9 +3006,18 @@ ice_set_rx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; } else { @@ -3013,20 +3027,37 @@ ice_set_rx_function(struct rte_eth_dev *dev) if (ad->rx_vec_allowed) { if (dev->data->scattered_rx) { - PMD_DRV_LOG(DEBUG, + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Scattered Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_scattered_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Scattered Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? + dev->rx_pkt_burst = use_avx2 ? ice_recv_scattered_pkts_vec_avx2 : ice_recv_scattered_pkts_vec; + } } else { - PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, + "Using %sVector Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? - ice_recv_pkts_vec_avx2 : - ice_recv_pkts_vec; + dev->rx_pkt_burst = use_avx2 ? + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; + } } return; } @@ -3063,6 +3094,10 @@ static const struct { { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, { ice_recv_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, + { ice_recv_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, { ice_recv_pkts_vec_avx2, "Vector AVX2" }, { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, @@ -3167,6 +3202,7 @@ ice_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_tx_queue *txq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3181,9 +3217,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; } else { @@ -3192,12 +3237,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } if (ad->tx_vec_allowed) { - PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - dev->tx_pkt_burst = use_avx2 ? - ice_xmit_pkts_vec_avx2 : - ice_xmit_pkts_vec; + if (use_avx512) { + PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + ice_xmit_pkts_vec_avx2 : + ice_xmit_pkts_vec; + } dev->tx_pkt_prepare = NULL; return; @@ -3222,6 +3273,9 @@ static const struct { { ice_xmit_pkts_simple, "Scalar Simple" }, { ice_xmit_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, { ice_xmit_pkts_vec, "Vector SSE" }, #endif diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 1c23c7541e..7cebbc2916 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -248,6 +248,13 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c new file mode 100644 index 0000000000..6a9d0a8eaa --- /dev/null +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include <x86intrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#define ICE_DESCS_PER_LOOP_AVX 8 + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp, + rte_lcore_id()); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* We need to pull 'n' more MBUFs into the software ring */ + if (cache->len < ICE_RXQ_REARM_THRESH) { + uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size - + cache->len); + + int ret = rte_mempool_ops_dequeue_bulk(rxq->mp, + &cache->objs[cache->len], req); + if (ret == 0) { + cache->len += req; + } else { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128 + ((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + } + + const __m512i iova_offsets = _mm512_set1_epi64 + (offsetof(struct rte_mbuf, buf_iova)); + const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /* shuffle the iova into correct slots. Values 4-7 will contain + * zeros, so use 7 for a zero-value. + */ + const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0); +#else + const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); +#endif + + /* fill up the rxd in vector, process 8 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) { + const __m512i mbuf_ptrs = _mm512_loadu_si512 + (&cache->objs[cache->len - 8]); + _mm512_store_si512(rxep, mbuf_ptrs); + + /* gather iova of mbuf0-7 into one zmm reg */ + const __m512i iova_base_addrs = _mm512_i64gather_epi64 + (_mm512_add_epi64(mbuf_ptrs, iova_offsets), + 0, /* base */ + 1 /* scale */); + const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs, + headroom); +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + const __m512i iovas0 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 0)); + const __m512i iovas1 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 1)); + + /* permute leaves iova 2-3 in hdr_addr of desc 0-1 + * but these are ignored by driver since header split not + * enabled. Similarly for desc 4 & 5. + */ + const __m512i desc0_1 = _mm512_permutexvar_epi64 + (permute_idx, iovas0); + const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8); + + const __m512i desc4_5 = _mm512_permutexvar_epi64 + (permute_idx, iovas1); + const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8); + + _mm512_store_si512((void *)rxdp, desc0_1); + _mm512_store_si512((void *)(rxdp + 2), desc2_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_5); + _mm512_store_si512((void *)(rxdp + 6), desc6_7); +#else + /* permute leaves iova 4-7 in hdr_addr of desc 0-3 + * but these are ignored by driver since header split not + * enabled. + */ + const __m512i desc0_3 = _mm512_permutexvar_epi64 + (permute_idx, iova_addrs); + const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8); + + _mm512_store_si512((void *)rxdp, desc0_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_7); +#endif + rxep += 8, rxdp += 8, cache->len -= 8; + } + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline uint16_t +_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m512i crc_adjust = + _mm512_set4_epi32 + (0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0 /* ignore non-length fields */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + ICE_RX_DESC_STATUS_EOF_S); + + /* mask to shuffle from desc. to mbuf (4 descriptors)*/ + const __m512i shuf_msk = + _mm512_set4_epi32 + (/* octet 12~15, 32 bits rss */ + 15 << 24 | 14 << 16 | 13 << 8 | 12, + /* octet 10~11, 16 bits vlan_macip */ + /* octet 4~5, 16 bits data_len */ + 11 << 24 | 10 << 16 | 5 << 8 | 4, + /* skip hi 16 bits pkt_len, zero out */ + /* octet 4~5, 16 bits pkt_len */ + 0xFFFF << 16 | 5 << 8 | 4, + /* pkt_type set as unknown */ + 0xFFFFFFFF + ); + + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* 2nd 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* 2nd 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += ICE_DESCS_PER_LOOP_AVX, + rxdp += ICE_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m512i raw_desc0_3, raw_desc4_7; + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; + + /* load in descriptors, in reverse order */ + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + + raw_desc4_7 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc4_5), + raw_desc6_7, 1); + raw_desc0_3 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc0_1), + raw_desc2_3, 1); + + if (split_packet) { + int j; + + for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 0-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk); + __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk); + + mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust); + mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptype_mask = + _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptypes4_7 = + _mm512_and_si512(raw_desc4_7, ptype_mask); + const __m512i ptypes0_3 = + _mm512_and_si512(raw_desc0_3, ptype_mask); + + const __m256i ptypes6_7 = + _mm512_extracti64x4_epi64(ptypes4_7, 1); + const __m256i ptypes4_5 = + _mm512_extracti64x4_epi64(ptypes4_7, 0); + const __m256i ptypes2_3 = + _mm512_extracti64x4_epi64(ptypes0_3, 1); + const __m256i ptypes0_1 = + _mm512_extracti64x4_epi64(ptypes0_3, 0); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + const __m512i ptype4_7 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype7], + 0, 0, 0, ptype_tbl[ptype6], + 0, 0, 0, ptype_tbl[ptype5], + 0, 0, 0, ptype_tbl[ptype4]); + const __m512i ptype0_3 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype3], + 0, 0, 0, ptype_tbl[ptype2], + 0, 0, 0, ptype_tbl[ptype1], + 0, 0, 0, ptype_tbl[ptype0]); + + mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); + mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + + /** + * use permute/extract to get status content + * After the operations, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + /* merge the status bits into one register */ + const __m512i status_permute_msk = _mm512_set_epi32 + (0, 0, 0, 0, + 0, 0, 0, 0, + 22, 30, 6, 14, + 18, 26, 2, 10); + const __m512i raw_status0_7 = _mm512_permutex2var_epi32 + (raw_desc4_7, status_permute_msk, raw_desc0_3); + __m256i status0_7 = _mm512_extracti64x4_epi64 + (raw_status0_7, 0); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + + const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += ICE_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != ICE_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly */ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then */ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > ICE_VPMD_RX_BURST) { + uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, ICE_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < ICE_VPMD_RX_BURST) + return retval; + } + return retval + ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + ice_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + ice_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + ice_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, + &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index 99e1b773a3..077e98f5da 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -33,6 +33,17 @@ if arch_subdir == 'x86' c_args: [cflags, '-mavx2']) objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') endif + + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) + cflags += ['-DCC_AVX512_SUPPORT'] + ice_avx512_lib = static_library('ice_avx512_lib', + 'ice_rxtx_vec_avx512.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') + endif endif sources += files('ice_dcf.c', -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 2/3] net/ice: add RSS hash parsing in AVX512 path 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 1/3] net/ice: add AVX512 vector path Leyi Rong @ 2020-10-20 10:51 ` Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-20 10:51 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong Support RSS hash parsing in AVX512 data path as the default RXDID is set to #22, that means the RSS hash field locates in the 2nd 16B of each Flex Rx descriptor. Signed-off-by: Leyi Rong <leyi.rong@intel.com> --- drivers/net/ice/ice_rxtx_vec_avx512.c | 105 ++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 6a9d0a8eaa..a2a5d9987a 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -176,8 +176,8 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* mask to shuffle from desc. to mbuf (4 descriptors)*/ const __m512i shuf_msk = _mm512_set4_epi32 - (/* octet 12~15, 32 bits rss */ - 15 << 24 | 14 << 16 | 13 << 8 | 12, + (/* rss hash parsed separately */ + 0xFFFFFFFF, /* octet 10~11, 16 bits vlan_macip */ /* octet 4~5, 16 bits data_len */ 11 << 24 | 10 << 16 | 5 << 8 | 4, @@ -399,6 +399,11 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + /** * use permute/extract to get status content * After the operations, the packets status flags are in the @@ -438,6 +443,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* merge flags */ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif + /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. @@ -471,11 +567,6 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, _mm256_srli_si256(mbuf_flags, 4), 0x04); - const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); - const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); - const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); - const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); - /* permute to add in the rx_descriptor e.g. rss fields */ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 3/3] net/ice: optimize Tx path on AVX512 vPMD 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong @ 2020-10-20 10:51 ` Leyi Rong 2 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-20 10:51 UTC (permalink / raw) To: bruce.richardson, wenzhuo.lu, qi.z.zhang; +Cc: dev, Leyi Rong Optimize Tx path by using AVX512 instructions and vectorize the tx free bufs process. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.h | 4 + drivers/net/ice/ice_rxtx_vec_avx512.c | 155 +++++++++++++++++++++----- drivers/net/ice/ice_rxtx_vec_common.h | 36 ++++-- 3 files changed, 162 insertions(+), 33 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 7cebbc2916..23409d479a 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -96,6 +96,10 @@ struct ice_tx_entry { uint16_t last_id; }; +struct ice_vec_tx_entry { + struct rte_mbuf *mbuf; +}; + struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ rte_iova_t tx_ring_dma; /* TX ring DMA address */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index a2a5d9987a..e5e7cc1482 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -756,6 +756,108 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +static __rte_always_inline int +ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) +{ + struct ice_vec_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh - 1) + */ + txep = (void *)txq->sw_ring; + txep += txq->tx_next_dd - (n - 1); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) { + struct rte_mempool *mp = txep[0].mbuf->pool; + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + void **cache_objs = &cache->objs[cache->len]; + + if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { + rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); + goto done; + } + + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it + * crosses the cache flush threshold) is flushed to the ring. + */ + /* Add elements back into the cache */ + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); + const __m512i d = _mm512_loadu_si512(&txep[copied + 24]); + + _mm512_storeu_si512(&cache_objs[copied], a); + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); + copied += 32; + } + cache->len += n; + + if (cache->len >= cache->flushthresh) { + rte_mempool_ops_enqueue_bulk + (mp, &cache->objs[cache->size], + cache->len - cache->size); + cache->len = cache->size; + } + goto done; + } + + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + +done: + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + static inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) @@ -766,7 +868,7 @@ ice_vtx1(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); __m128i descriptor = _mm_set_epi64x(high_qw, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } @@ -777,13 +879,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp, const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); - /* if unaligned on 32-bit boundary, do one to align */ - if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { - ice_vtx1(txdp, *pkt, flags); - nb_pkts--, txdp++, pkt++; - } - - /* do two at a time while possible, in bursts */ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { uint64_t hi_qw3 = hi_qw_tmpl | @@ -802,20 +897,17 @@ ice_vtx(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); - __m256i desc2_3 = - _mm256_set_epi64x + __m512i desc0_3 = + _mm512_set_epi64 (hi_qw3, - pkt[3]->buf_physaddr + pkt[3]->data_off, + pkt[3]->buf_iova + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_physaddr + pkt[2]->data_off); - __m256i desc0_1 = - _mm256_set_epi64x - (hi_qw1, - pkt[1]->buf_physaddr + pkt[1]->data_off, + pkt[2]->buf_iova + pkt[2]->data_off, + hi_qw1, + pkt[1]->buf_iova + pkt[1]->data_off, hi_qw0, - pkt[0]->buf_physaddr + pkt[0]->data_off); - _mm256_store_si256((void *)(txdp + 2), desc2_3); - _mm256_store_si256((void *)txdp, desc0_1); + pkt[0]->buf_iova + pkt[0]->data_off); + _mm512_storeu_si512((void *)txdp, desc0_3); } /* do any last ones */ @@ -825,13 +917,23 @@ ice_vtx(volatile struct ice_tx_desc *txdp, } } +static __rte_always_inline void +ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + static inline uint16_t ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; - struct ice_tx_entry *txep; + struct ice_vec_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = ICE_TD_CMD; uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; @@ -840,7 +942,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_avx512(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -848,13 +950,14 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txep = (void *)txq->sw_ring; + txep += tx_id; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { - ice_tx_backlog_entry(txep, tx_pkts, n); + ice_tx_backlog_entry_avx512(txep, tx_pkts, n); ice_vtx(txdp, tx_pkts, n - 1, flags); tx_pkts += (n - 1); @@ -868,11 +971,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ - txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txdp = txq->tx_ring; + txep = (void *)txq->sw_ring; } - ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit); ice_vtx(txdp, tx_pkts, nb_commit, flags); diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index e2019c8d6a..ae2ac29f2a 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -189,16 +189,38 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) * so need to free remains more carefully. */ i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { + +#ifdef CC_AVX512_SUPPORT + struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev; + + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) { + struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; + + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + } else +#endif + { + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } - i = 0; - } - for (; i < txq->tx_tail; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; } } -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong ` (3 preceding siblings ...) 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong @ 2020-10-23 4:14 ` Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong ` (3 more replies) 4 siblings, 4 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-23 4:14 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong This patchset aims to support AVX512 vPMD on ice. --- v4: - Replace buf_physaddr by buf_iova in the 1st patch to eliminate build error. v3: - Code rebased. v2: - No internal judgement when RTE_MACHINE_CPUFLAG_AVX512F is set in meson.build. - Add RSS hash parsing as default RXDID is set to #22. - Use buf_iova instead of buf_physaddr, as buf_physaddr will be removed. Leyi Rong (3): net/ice: add AVX512 vector path net/ice: add RSS hash parsing in AVX512 path net/ice: optimize Tx path on AVX512 vPMD drivers/net/ice/ice_rxtx.c | 90 ++- drivers/net/ice/ice_rxtx.h | 11 + drivers/net/ice/ice_rxtx_vec_avx512.c | 1018 +++++++++++++++++++++++++ drivers/net/ice/ice_rxtx_vec_common.h | 36 +- drivers/net/ice/meson.build | 11 + 5 files changed, 1141 insertions(+), 25 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong @ 2020-10-23 4:14 ` Leyi Rong 2020-10-25 16:23 ` David Marchand 2020-10-27 8:32 ` Ali Alnubani 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong ` (2 subsequent siblings) 3 siblings, 2 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-23 4:14 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong, Bruce Richardson Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c to support ice AVX512 vPMD. This patch aims to enable AVX512 on ice vPMD. Main changes are focus on Rx path compared with AVX2 vPMD. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.c | 90 ++- drivers/net/ice/ice_rxtx.h | 7 + drivers/net/ice/ice_rxtx_vec_avx512.c | 824 ++++++++++++++++++++++++++ drivers/net/ice/meson.build | 11 + 4 files changed, 914 insertions(+), 18 deletions(-) create mode 100644 drivers/net/ice/ice_rxtx_vec_avx512.c diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index ee576c362a..f6291894cd 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -1930,6 +1930,10 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 if (dev->rx_pkt_burst == ice_recv_pkts_vec || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec || +#ifdef CC_AVX512_SUPPORT + dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 || + dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 || +#endif dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 || dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2) return ptypes; @@ -2987,6 +2991,7 @@ ice_set_rx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_rx_queue *rxq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3001,9 +3006,18 @@ ice_set_rx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; } else { @@ -3013,20 +3027,37 @@ ice_set_rx_function(struct rte_eth_dev *dev) if (ad->rx_vec_allowed) { if (dev->data->scattered_rx) { - PMD_DRV_LOG(DEBUG, + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Scattered Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_scattered_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Scattered Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? + dev->rx_pkt_burst = use_avx2 ? ice_recv_scattered_pkts_vec_avx2 : ice_recv_scattered_pkts_vec; + } } else { - PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + if (use_avx512) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Rx (port %d).", + dev->data->port_id); + dev->rx_pkt_burst = + ice_recv_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, + "Using %sVector Rx (port %d).", use_avx2 ? "avx2 " : "", dev->data->port_id); - dev->rx_pkt_burst = use_avx2 ? - ice_recv_pkts_vec_avx2 : - ice_recv_pkts_vec; + dev->rx_pkt_burst = use_avx2 ? + ice_recv_pkts_vec_avx2 : + ice_recv_pkts_vec; + } } return; } @@ -3063,6 +3094,10 @@ static const struct { { ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" }, { ice_recv_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" }, + { ice_recv_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" }, { ice_recv_pkts_vec_avx2, "Vector AVX2" }, { ice_recv_scattered_pkts_vec, "Vector SSE Scattered" }, @@ -3167,6 +3202,7 @@ ice_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_tx_queue *txq; int i; + bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { @@ -3181,9 +3217,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } } - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 && + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) +#ifdef CC_AVX512_SUPPORT + use_avx512 = true; +#else + PMD_DRV_LOG(NOTICE, + "AVX512 is not supported in build env"); +#endif + if (!use_avx512 && + (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; } else { @@ -3192,12 +3237,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) } if (ad->tx_vec_allowed) { - PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - dev->tx_pkt_burst = use_avx2 ? - ice_xmit_pkts_vec_avx2 : - ice_xmit_pkts_vec; + if (use_avx512) { + PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + } else { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + ice_xmit_pkts_vec_avx2 : + ice_xmit_pkts_vec; + } dev->tx_pkt_prepare = NULL; return; @@ -3222,6 +3273,9 @@ static const struct { { ice_xmit_pkts_simple, "Scalar Simple" }, { ice_xmit_pkts, "Scalar" }, #ifdef RTE_ARCH_X86 +#ifdef CC_AVX512_SUPPORT + { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, +#endif { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, { ice_xmit_pkts_vec, "Vector SSE" }, #endif diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 1c23c7541e..7cebbc2916 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -248,6 +248,13 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c new file mode 100644 index 0000000000..73c4ffce99 --- /dev/null +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#include "ice_rxtx_vec_common.h" + +#include <x86intrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +#define ICE_DESCS_PER_LOOP_AVX 8 + +static inline void +ice_rxq_rearm(struct ice_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ice_rx_flex_desc *rxdp; + struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp, + rte_lcore_id()); + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* We need to pull 'n' more MBUFs into the software ring */ + if (cache->len < ICE_RXQ_REARM_THRESH) { + uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size - + cache->len); + + int ret = rte_mempool_ops_dequeue_bulk(rxq->mp, + &cache->objs[cache->len], req); + if (ret == 0) { + cache->len += req; + } else { + if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128 + ((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + ICE_RXQ_REARM_THRESH; + return; + } + } + + const __m512i iova_offsets = _mm512_set1_epi64 + (offsetof(struct rte_mbuf, buf_iova)); + const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /* shuffle the iova into correct slots. Values 4-7 will contain + * zeros, so use 7 for a zero-value. + */ + const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0); +#else + const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0); +#endif + + /* fill up the rxd in vector, process 8 mbufs in one loop */ + for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) { + const __m512i mbuf_ptrs = _mm512_loadu_si512 + (&cache->objs[cache->len - 8]); + _mm512_store_si512(rxep, mbuf_ptrs); + + /* gather iova of mbuf0-7 into one zmm reg */ + const __m512i iova_base_addrs = _mm512_i64gather_epi64 + (_mm512_add_epi64(mbuf_ptrs, iova_offsets), + 0, /* base */ + 1 /* scale */); + const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs, + headroom); +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + const __m512i iovas0 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 0)); + const __m512i iovas1 = _mm512_castsi256_si512 + (_mm512_extracti64x4_epi64(iova_addrs, 1)); + + /* permute leaves iova 2-3 in hdr_addr of desc 0-1 + * but these are ignored by driver since header split not + * enabled. Similarly for desc 4 & 5. + */ + const __m512i desc0_1 = _mm512_permutexvar_epi64 + (permute_idx, iovas0); + const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8); + + const __m512i desc4_5 = _mm512_permutexvar_epi64 + (permute_idx, iovas1); + const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8); + + _mm512_store_si512((void *)rxdp, desc0_1); + _mm512_store_si512((void *)(rxdp + 2), desc2_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_5); + _mm512_store_si512((void *)(rxdp + 6), desc6_7); +#else + /* permute leaves iova 4-7 in hdr_addr of desc 0-3 + * but these are ignored by driver since header split not + * enabled. + */ + const __m512i desc0_3 = _mm512_permutexvar_epi64 + (permute_idx, iova_addrs); + const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8); + + _mm512_store_si512((void *)rxdp, desc0_3); + _mm512_store_si512((void *)(rxdp + 4), desc4_7); +#endif + rxep += 8, rxdp += 8, cache->len -= 8; + } + + rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +static inline uint16_t +_ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH) + ice_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.status_error0 & + rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) + return 0; + + /* constants used in processing loop */ + const __m512i crc_adjust = + _mm512_set4_epi32 + (0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0 /* ignore non-length fields */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + ICE_RX_DESC_STATUS_EOF_S); + + /* mask to shuffle from desc. to mbuf (4 descriptors)*/ + const __m512i shuf_msk = + _mm512_set4_epi32 + (/* octet 12~15, 32 bits rss */ + 15 << 24 | 14 << 16 | 13 << 8 | 12, + /* octet 10~11, 16 bits vlan_macip */ + /* octet 4~5, 16 bits data_len */ + 11 << 24 | 10 << 16 | 5 << 8 | 4, + /* skip hi 16 bits pkt_len, zero out */ + /* octet 4~5, 16 bits pkt_len */ + 0xFFFF << 16 | 5 << 8 | 4, + /* pkt_type set as unknown */ + 0xFFFFFFFF + ); + + /** + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /** + * mask everything except Checksum Reports, RSS indication + * and VLAN indication. + * bit6:4 for IP/L4 checksum errors. + * bit12 is for RSS indication. + * bit13 is for VLAN indication. + */ + const __m256i flags_mask = + _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + /** + * data to be shuffled by the result of the flags mask shifted by 4 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /* 2nd 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i cksum_mask = + _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + /** + * data to be shuffled by result of flag mask, shifted down 12. + * If RSS(bit12)/VLAN(bit13) are set, + * shuffle moves appropriate flags in place. + */ + const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, + /* 2nd 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0); + + uint16_t i, received; + + for (i = 0, received = 0; i < nb_pkts; + i += ICE_DESCS_PER_LOOP_AVX, + rxdp += ICE_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256 + ((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m512i raw_desc0_3, raw_desc4_7; + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; + + /* load in descriptors, in reverse order */ + const __m128i raw_desc7 = + _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = + _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = + _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = + _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = + _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = + _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = + _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = + _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc6), + raw_desc7, 1); + raw_desc4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc4), + raw_desc5, 1); + raw_desc2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc2), + raw_desc3, 1); + raw_desc0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc0), + raw_desc1, 1); + + raw_desc4_7 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc4_5), + raw_desc6_7, 1); + raw_desc0_3 = + _mm512_inserti64x4 + (_mm512_castsi256_si512(raw_desc0_1), + raw_desc2_3, 1); + + if (split_packet) { + int j; + + for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /** + * convert descriptors 0-7 into mbufs, re-arrange fields. + * Then write into the mbuf. + */ + __m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk); + __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk); + + mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust); + mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptype_mask = + _mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M); + + /** + * to get packet types, ptype is located in bit16-25 + * of each 128bits + */ + const __m512i ptypes4_7 = + _mm512_and_si512(raw_desc4_7, ptype_mask); + const __m512i ptypes0_3 = + _mm512_and_si512(raw_desc0_3, ptype_mask); + + const __m256i ptypes6_7 = + _mm512_extracti64x4_epi64(ptypes4_7, 1); + const __m256i ptypes4_5 = + _mm512_extracti64x4_epi64(ptypes4_7, 0); + const __m256i ptypes2_3 = + _mm512_extracti64x4_epi64(ptypes0_3, 1); + const __m256i ptypes0_1 = + _mm512_extracti64x4_epi64(ptypes0_3, 0); + const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9); + const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1); + const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9); + const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1); + const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9); + const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1); + const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9); + const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1); + + const __m512i ptype4_7 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype7], + 0, 0, 0, ptype_tbl[ptype6], + 0, 0, 0, ptype_tbl[ptype5], + 0, 0, 0, ptype_tbl[ptype4]); + const __m512i ptype0_3 = _mm512_set_epi32 + (0, 0, 0, ptype_tbl[ptype3], + 0, 0, 0, ptype_tbl[ptype2], + 0, 0, 0, ptype_tbl[ptype1], + 0, 0, 0, ptype_tbl[ptype0]); + + mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); + mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + + /** + * use permute/extract to get status content + * After the operations, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + /* merge the status bits into one register */ + const __m512i status_permute_msk = _mm512_set_epi32 + (0, 0, 0, 0, + 0, 0, 0, 0, + 22, 30, 6, 14, + 18, 26, 2, 10); + const __m512i raw_status0_7 = _mm512_permutex2var_epi32 + (raw_desc4_7, status_permute_msk, raw_desc0_3); + __m256i status0_7 = _mm512_extracti64x4_epi64 + (raw_status0_7, 0); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + /** + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init + * data so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend + * for each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, + rearm_data), + 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + + rearm6 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 8), + 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(mbuf_flags, 4), + 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(mbuf_flags, 4), + 0x04); + + const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = + _mm256_castsi128_si256 + (_mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 8), + 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, + _mm256_slli_si256(odd_flags, 4), + 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, + _mm256_srli_si256(odd_flags, 4), + 0x04); + + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = + _mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = + _mm_packus_epi32 + (_mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, + 1)); + /** + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /** + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = + _mm_set_epi8(/* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + /* move values to lo 64b */ + 8, 0, 10, 2, + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = + _mm_cvtsi128_si64(split_bits); + split_packet += ICE_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_extracti128_si256 + (status0_7, 1))); + burst += __builtin_popcountll + (_mm_cvtsi128_si64 + (_mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != ICE_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/** + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ice_rx_queue *rxq = rx_queue; + uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (!rxq->pkt_first_seg && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly */ + unsigned int i = 0; + + if (!rxq->pkt_first_seg) { + /* find the first split flag, and only reassemble then */ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + rxq->pkt_first_seg = rx_pkts[i]; + } + return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/** + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet + */ +uint16_t +ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > ICE_VPMD_RX_BURST) { + uint16_t burst = ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, ICE_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < ICE_VPMD_RX_BURST) + return retval; + } + return retval + ice_recv_scattered_burst_vec_avx512(rx_queue, + rx_pkts + retval, nb_pkts); +} + +static inline void +ice_vtx1(volatile struct ice_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = + (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | + ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_iova + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +ice_vtx(volatile struct ice_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + ice_vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = + hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw2 = + hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw1 = + hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + uint64_t hi_qw0 = + hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << + ICE_TXD_QW1_TX_BUF_SZ_S); + + __m256i desc2_3 = + _mm256_set_epi64x + (hi_qw3, + pkt[3]->buf_iova + pkt[3]->data_off, + hi_qw2, + pkt[2]->buf_iova + pkt[2]->data_off); + __m256i desc0_1 = + _mm256_set_epi64x + (hi_qw1, + pkt[1]->buf_iova + pkt[1]->data_off, + hi_qw0, + pkt[0]->buf_iova + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + ice_vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + volatile struct ice_tx_desc *txdp; + struct ice_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = ICE_TD_CMD; + uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ice_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + ice_tx_backlog_entry(txep, tx_pkts, n); + + ice_vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + ice_vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + + ice_vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) << + ICE_TXD_QW1_CMD_S); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, + &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index 254595af85..85e8baf912 100644 --- a/drivers/net/ice/meson.build +++ b/drivers/net/ice/meson.build @@ -34,6 +34,17 @@ if arch_subdir == 'x86' c_args: [cflags, '-mavx2']) objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') endif + + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) + cflags += ['-DCC_AVX512_SUPPORT'] + ice_avx512_lib = static_library('ice_avx512_lib', + 'ice_rxtx_vec_avx512.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + include_directories: includes, + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') + endif endif sources += files('ice_dcf.c', -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong @ 2020-10-25 16:23 ` David Marchand 2020-10-26 7:12 ` Rong, Leyi 2020-10-27 8:32 ` Ali Alnubani 1 sibling, 1 reply; 25+ messages in thread From: David Marchand @ 2020-10-25 16:23 UTC (permalink / raw) To: Leyi Rong, Qi Zhang, Yigit, Ferruh Cc: Wenzhuo Lu, dev, Bruce Richardson, Thomas Monjalon Hello Leyi, Qi, Ferruh, On Fri, Oct 23, 2020 at 6:37 AM Leyi Rong <leyi.rong@intel.com> wrote: > diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build > index 254595af85..85e8baf912 100644 > --- a/drivers/net/ice/meson.build > +++ b/drivers/net/ice/meson.build > @@ -34,6 +34,17 @@ if arch_subdir == 'x86' > c_args: [cflags, '-mavx2']) > objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') > endif > + > + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) > + cflags += ['-DCC_AVX512_SUPPORT'] > + ice_avx512_lib = static_library('ice_avx512_lib', > + 'ice_rxtx_vec_avx512.c', > + dependencies: [static_rte_ethdev, > + static_rte_kvargs, static_rte_hash], > + include_directories: includes, > + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) > + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') > + endif > endif > > sources += files('ice_dcf.c', > -- > 2.17.1 > RTE_MACHINE_CPUFLAG_AVX512F can be removed. Worth fixing before hitting the main tree. Thanks. -- David Marchand ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-25 16:23 ` David Marchand @ 2020-10-26 7:12 ` Rong, Leyi 2020-10-26 8:09 ` David Marchand 2020-10-27 10:22 ` Ferruh Yigit 0 siblings, 2 replies; 25+ messages in thread From: Rong, Leyi @ 2020-10-26 7:12 UTC (permalink / raw) To: David Marchand, Zhang, Qi Z, Yigit, Ferruh Cc: Lu, Wenzhuo, dev, Richardson, Bruce, Thomas Monjalon > -----Original Message----- > From: David Marchand <david.marchand@redhat.com> > Sent: Monday, October 26, 2020 12:24 AM > To: Rong, Leyi <leyi.rong@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Yigit, > Ferruh <ferruh.yigit@intel.com> > Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; dev <dev@dpdk.org>; Richardson, > Bruce <bruce.richardson@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Subject: Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path > > Hello Leyi, Qi, Ferruh, > > On Fri, Oct 23, 2020 at 6:37 AM Leyi Rong <leyi.rong@intel.com> wrote: > > diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build > > index 254595af85..85e8baf912 100644 > > --- a/drivers/net/ice/meson.build > > +++ b/drivers/net/ice/meson.build > > @@ -34,6 +34,17 @@ if arch_subdir == 'x86' > > c_args: [cflags, '-mavx2']) > > objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') > > endif > > + > > + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not > machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) > > + cflags += ['-DCC_AVX512_SUPPORT'] > > + ice_avx512_lib = static_library('ice_avx512_lib', > > + 'ice_rxtx_vec_avx512.c', > > + dependencies: [static_rte_ethdev, > > + static_rte_kvargs, static_rte_hash], > > + include_directories: includes, > > + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) > > + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') > > + endif > > endif > > > > sources += files('ice_dcf.c', > > -- > > 2.17.1 > > > > RTE_MACHINE_CPUFLAG_AVX512F can be removed. > Worth fixing before hitting the main tree. > > Thanks. > > -- > David Marchand Hello David, Would prefer using __AVX512F__ instead of RTE_MACHINE_CPUFLAG_AVX512F here rather than remove the RTE_MACHINE_CPUFLAG_ macro directly to check the CPU capability. So the judgment statement will be if cc.get_define('__AVX512F__', args: machine_args) != '' or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) what do you think? Hello Ferruh, As the patchset is already merged into dpdk-next-net, I'm going to make another patch for this if it's accepted? ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-26 7:12 ` Rong, Leyi @ 2020-10-26 8:09 ` David Marchand 2020-10-27 10:19 ` Bruce Richardson 2020-10-27 10:22 ` Ferruh Yigit 1 sibling, 1 reply; 25+ messages in thread From: David Marchand @ 2020-10-26 8:09 UTC (permalink / raw) To: Rong, Leyi, Richardson, Bruce Cc: Zhang, Qi Z, Yigit, Ferruh, Lu, Wenzhuo, dev, Thomas Monjalon On Mon, Oct 26, 2020 at 8:13 AM Rong, Leyi <leyi.rong@intel.com> wrote: > Would prefer using __AVX512F__ instead of RTE_MACHINE_CPUFLAG_AVX512F here rather than remove the RTE_MACHINE_CPUFLAG_ macro directly to check the CPU capability. > So the judgment statement will be > if cc.get_define('__AVX512F__', args: machine_args) != '' or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) > > what do you think? No opinion as I have yet to understand the subtleties to control enablement of avx stuff. Bruce? -- David Marchand ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-26 8:09 ` David Marchand @ 2020-10-27 10:19 ` Bruce Richardson 0 siblings, 0 replies; 25+ messages in thread From: Bruce Richardson @ 2020-10-27 10:19 UTC (permalink / raw) To: David Marchand Cc: Rong, Leyi, Zhang, Qi Z, Yigit, Ferruh, Lu, Wenzhuo, dev, Thomas Monjalon On Mon, Oct 26, 2020 at 09:09:31AM +0100, David Marchand wrote: > On Mon, Oct 26, 2020 at 8:13 AM Rong, Leyi <leyi.rong@intel.com> wrote: > > Would prefer using __AVX512F__ instead of RTE_MACHINE_CPUFLAG_AVX512F here rather than remove the RTE_MACHINE_CPUFLAG_ macro directly to check the CPU capability. > > So the judgment statement will be > > if cc.get_define('__AVX512F__', args: machine_args) != '' or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) > > > > what do you think? > > No opinion as I have yet to understand the subtleties to control > enablement of avx stuff. > Bruce? > The above proposal is correct, in that we now check the compiler flags directly rather than using our own custom flags. /Bruce ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-26 7:12 ` Rong, Leyi 2020-10-26 8:09 ` David Marchand @ 2020-10-27 10:22 ` Ferruh Yigit 1 sibling, 0 replies; 25+ messages in thread From: Ferruh Yigit @ 2020-10-27 10:22 UTC (permalink / raw) To: Rong, Leyi, David Marchand, Zhang, Qi Z Cc: Lu, Wenzhuo, dev, Richardson, Bruce, Thomas Monjalon On 10/26/2020 7:12 AM, Rong, Leyi wrote: > >> -----Original Message----- >> From: David Marchand <david.marchand@redhat.com> >> Sent: Monday, October 26, 2020 12:24 AM >> To: Rong, Leyi <leyi.rong@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Yigit, >> Ferruh <ferruh.yigit@intel.com> >> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>; dev <dev@dpdk.org>; Richardson, >> Bruce <bruce.richardson@intel.com>; Thomas Monjalon >> <thomas@monjalon.net> >> Subject: Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path >> >> Hello Leyi, Qi, Ferruh, >> >> On Fri, Oct 23, 2020 at 6:37 AM Leyi Rong <leyi.rong@intel.com> wrote: >>> diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build >>> index 254595af85..85e8baf912 100644 >>> --- a/drivers/net/ice/meson.build >>> +++ b/drivers/net/ice/meson.build >>> @@ -34,6 +34,17 @@ if arch_subdir == 'x86' >>> c_args: [cflags, '-mavx2']) >>> objs += ice_avx2_lib.extract_objects('ice_rxtx_vec_avx2.c') >>> endif >>> + >>> + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX512F') or (not >> machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) >>> + cflags += ['-DCC_AVX512_SUPPORT'] >>> + ice_avx512_lib = static_library('ice_avx512_lib', >>> + 'ice_rxtx_vec_avx512.c', >>> + dependencies: [static_rte_ethdev, >>> + static_rte_kvargs, static_rte_hash], >>> + include_directories: includes, >>> + c_args: [cflags, '-march=skylake-avx512', '-mavx512f']) >>> + objs += ice_avx512_lib.extract_objects('ice_rxtx_vec_avx512.c') >>> + endif >>> endif >>> >>> sources += files('ice_dcf.c', >>> -- >>> 2.17.1 >>> >> >> RTE_MACHINE_CPUFLAG_AVX512F can be removed. >> Worth fixing before hitting the main tree. >> >> Thanks. >> >> -- >> David Marchand > > Hello David, > > Would prefer using __AVX512F__ instead of RTE_MACHINE_CPUFLAG_AVX512F here rather than remove the RTE_MACHINE_CPUFLAG_ macro directly to check the CPU capability. > So the judgment statement will be > if cc.get_define('__AVX512F__', args: machine_args) != '' or (not machine_args.contains('-mno-avx512f') and cc.has_argument('-mavx512f')) > > what do you think? > > > Hello Ferruh, > > As the patchset is already merged into dpdk-next-net, I'm going to make another patch for this if it's accepted? > Hi Leyi, Please make another patch, I can squash it in the next-net. Thanks, ferruh ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-10-25 16:23 ` David Marchand @ 2020-10-27 8:32 ` Ali Alnubani 2020-10-27 8:42 ` Ali Alnubani 1 sibling, 1 reply; 25+ messages in thread From: Ali Alnubani @ 2020-10-27 8:32 UTC (permalink / raw) To: Leyi Rong, qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Bruce Richardson Hi, > -----Original Message----- > From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong > Sent: Friday, October 23, 2020 7:14 AM > To: qi.z.zhang@intel.com; wenzhuo.lu@intel.com; > burce.richardson@intel.com > Cc: dev@dpdk.org; Leyi Rong <leyi.rong@intel.com>; Bruce Richardson > <bruce.richardson@intel.com> > Subject: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path > > Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c to > support ice AVX512 vPMD. > > This patch aims to enable AVX512 on ice vPMD. Main changes are focus on Rx > path compared with AVX2 vPMD. > > Signed-off-by: Leyi Rong <leyi.rong@intel.com> > Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> > --- This patch is causing the following build failure on CentOS 7 with gcc 4.8.5: """ drivers/librte_net_ice.a(net_ice_ice_rxtx.c.o): In function `ice_set_rx_function': drivers/net/ice/ice_rxtx.c:3034: undefined reference to `ice_recv_scattered_pkts_vec_avx512' drivers/net/ice/ice_rxtx.c:3050: undefined reference to `ice_recv_pkts_vec_avx512' drivers/librte_net_ice.a(net_ice_ice_rxtx.c.o): In function `ice_set_tx_function': drivers/net/ice/ice_rxtx.c:3243: undefined reference to `ice_xmit_pkts_vec_avx512' """ It reproduces when building with "meson --werror --buildtype=debug build && ninja -C build". Regards, Ali ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path 2020-10-27 8:32 ` Ali Alnubani @ 2020-10-27 8:42 ` Ali Alnubani 0 siblings, 0 replies; 25+ messages in thread From: Ali Alnubani @ 2020-10-27 8:42 UTC (permalink / raw) To: Leyi Rong, qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Bruce Richardson > -----Original Message----- > From: dev <dev-bounces@dpdk.org> On Behalf Of Ali Alnubani > Sent: Tuesday, October 27, 2020 10:33 AM > To: Leyi Rong <leyi.rong@intel.com>; qi.z.zhang@intel.com; > wenzhuo.lu@intel.com; burce.richardson@intel.com > Cc: dev@dpdk.org; Bruce Richardson <bruce.richardson@intel.com> > Subject: Re: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path > > Hi, > > > -----Original Message----- > > From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong > > Sent: Friday, October 23, 2020 7:14 AM > > To: qi.z.zhang@intel.com; wenzhuo.lu@intel.com; > > burce.richardson@intel.com > > Cc: dev@dpdk.org; Leyi Rong <leyi.rong@intel.com>; Bruce Richardson > > <bruce.richardson@intel.com> > > Subject: [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path > > > > Add AVX512 support for ice PMD. This patch adds ice_rxtx_vec_avx512.c > > to support ice AVX512 vPMD. > > > > This patch aims to enable AVX512 on ice vPMD. Main changes are focus > > on Rx path compared with AVX2 vPMD. > > > > Signed-off-by: Leyi Rong <leyi.rong@intel.com> > > Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> > > --- > > This patch is causing the following build failure on CentOS 7 with gcc 4.8.5: > > """ > drivers/librte_net_ice.a(net_ice_ice_rxtx.c.o): In function > `ice_set_rx_function': > drivers/net/ice/ice_rxtx.c:3034: undefined reference to > `ice_recv_scattered_pkts_vec_avx512' > drivers/net/ice/ice_rxtx.c:3050: undefined reference to > `ice_recv_pkts_vec_avx512' > drivers/librte_net_ice.a(net_ice_ice_rxtx.c.o): In function > `ice_set_tx_function': > drivers/net/ice/ice_rxtx.c:3243: undefined reference to > `ice_xmit_pkts_vec_avx512' > """ > > It reproduces when building with "meson --werror --buildtype=debug build > && ninja -C build". > It's also causing the following build failure on Ubuntu 18.04 (gcc 7.5.0) and on OpenSUSE Leap 15.2 (gcc 7.5.0): """ drivers/net/ice/ice_rxtx_vec_avx512.c: In function '_ice_recv_raw_pkts_vec_avx512': /usr/lib/gcc/x86_64-linux-gnu/7/include/avx512bwintrin.h:1831:1: error: inlining failed in call to always_inline '_mm512_shuffle_epi8': target specific option mismatch _mm512_shuffle_epi8 (__m512i __A, __m512i __B) ^~~~~~~~~~~~~~~~~~~ drivers/net/ice/ice_rxtx_vec_avx512.c:350:11: note: called from here __m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk); ^~~~~ """ Regards, Ali ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 2/3] net/ice: add RSS hash parsing in AVX512 path 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong @ 2020-10-23 4:14 ` Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2020-10-23 9:39 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Zhang, Qi Z 3 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-23 4:14 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong Support RSS hash parsing in AVX512 data path as the default RXDID is set to #22, that means the RSS hash field locates in the 2nd 16B of each Flex Rx descriptor. Signed-off-by: Leyi Rong <leyi.rong@intel.com> --- drivers/net/ice/ice_rxtx_vec_avx512.c | 105 ++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 73c4ffce99..bcef7fc800 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -176,8 +176,8 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* mask to shuffle from desc. to mbuf (4 descriptors)*/ const __m512i shuf_msk = _mm512_set4_epi32 - (/* octet 12~15, 32 bits rss */ - 15 << 24 | 14 << 16 | 13 << 8 | 12, + (/* rss hash parsed separately */ + 0xFFFFFFFF, /* octet 10~11, 16 bits vlan_macip */ /* octet 4~5, 16 bits data_len */ 11 << 24 | 10 << 16 | 5 << 8 | 4, @@ -399,6 +399,11 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7); mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3); + __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); + __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); + __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); + __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); + /** * use permute/extract to get status content * After the operations, the packets status flags are in the @@ -438,6 +443,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, /* merge flags */ const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); + +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif + /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. @@ -471,11 +567,6 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq, _mm256_srli_si256(mbuf_flags, 4), 0x04); - const __m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0); - const __m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1); - const __m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0); - const __m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1); - /* permute to add in the rx_descriptor e.g. rss fields */ rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 3/3] net/ice: optimize Tx path on AVX512 vPMD 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong @ 2020-10-23 4:14 ` Leyi Rong 2020-10-23 9:39 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Zhang, Qi Z 3 siblings, 0 replies; 25+ messages in thread From: Leyi Rong @ 2020-10-23 4:14 UTC (permalink / raw) To: qi.z.zhang, wenzhuo.lu, burce.richardson; +Cc: dev, Leyi Rong, Bruce Richardson Optimize Tx path by using AVX512 instructions and vectorize the tx free bufs process. Signed-off-by: Leyi Rong <leyi.rong@intel.com> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com> --- drivers/net/ice/ice_rxtx.h | 4 + drivers/net/ice/ice_rxtx_vec_avx512.c | 147 ++++++++++++++++++++++---- drivers/net/ice/ice_rxtx_vec_common.h | 36 +++++-- 3 files changed, 158 insertions(+), 29 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 7cebbc2916..23409d479a 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -96,6 +96,10 @@ struct ice_tx_entry { uint16_t last_id; }; +struct ice_vec_tx_entry { + struct rte_mbuf *mbuf; +}; + struct ice_tx_queue { uint16_t nb_tx_desc; /* number of TX descriptors */ rte_iova_t tx_ring_dma; /* TX ring DMA address */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index bcef7fc800..e5e7cc1482 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -756,6 +756,108 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts, rx_pkts + retval, nb_pkts); } +static __rte_always_inline int +ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) +{ + struct ice_vec_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[ICE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) != + rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh - 1) + */ + txep = (void *)txq->sw_ring; + txep += txq->tx_next_dd - (n - 1); + + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) { + struct rte_mempool *mp = txep[0].mbuf->pool; + struct rte_mempool_cache *cache = rte_mempool_default_cache(mp, + rte_lcore_id()); + void **cache_objs = &cache->objs[cache->len]; + + if (n > RTE_MEMPOOL_CACHE_MAX_SIZE) { + rte_mempool_ops_enqueue_bulk(mp, (void *)txep, n); + goto done; + } + + /* The cache follows the following algorithm + * 1. Add the objects to the cache + * 2. Anything greater than the cache min value (if it + * crosses the cache flush threshold) is flushed to the ring. + */ + /* Add elements back into the cache */ + uint32_t copied = 0; + /* n is multiple of 32 */ + while (copied < n) { + const __m512i a = _mm512_loadu_si512(&txep[copied]); + const __m512i b = _mm512_loadu_si512(&txep[copied + 8]); + const __m512i c = _mm512_loadu_si512(&txep[copied + 16]); + const __m512i d = _mm512_loadu_si512(&txep[copied + 24]); + + _mm512_storeu_si512(&cache_objs[copied], a); + _mm512_storeu_si512(&cache_objs[copied + 8], b); + _mm512_storeu_si512(&cache_objs[copied + 16], c); + _mm512_storeu_si512(&cache_objs[copied + 24], d); + copied += 32; + } + cache->len += n; + + if (cache->len >= cache->flushthresh) { + rte_mempool_ops_enqueue_bulk + (mp, &cache->objs[cache->size], + cache->len - cache->size); + cache->len = cache->size; + } + goto done; + } + + m = rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m) + rte_mempool_put(m->pool, m); + } + } + +done: + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + static inline void ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) @@ -777,13 +879,6 @@ ice_vtx(volatile struct ice_tx_desc *txdp, const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); - /* if unaligned on 32-bit boundary, do one to align */ - if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { - ice_vtx1(txdp, *pkt, flags); - nb_pkts--, txdp++, pkt++; - } - - /* do two at a time while possible, in bursts */ for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { uint64_t hi_qw3 = hi_qw_tmpl | @@ -802,20 +897,17 @@ ice_vtx(volatile struct ice_tx_desc *txdp, ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); - __m256i desc2_3 = - _mm256_set_epi64x + __m512i desc0_3 = + _mm512_set_epi64 (hi_qw3, pkt[3]->buf_iova + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_iova + pkt[2]->data_off); - __m256i desc0_1 = - _mm256_set_epi64x - (hi_qw1, + pkt[2]->buf_iova + pkt[2]->data_off, + hi_qw1, pkt[1]->buf_iova + pkt[1]->data_off, hi_qw0, pkt[0]->buf_iova + pkt[0]->data_off); - _mm256_store_si256((void *)(txdp + 2), desc2_3); - _mm256_store_si256((void *)txdp, desc0_1); + _mm512_storeu_si512((void *)txdp, desc0_3); } /* do any last ones */ @@ -825,13 +917,23 @@ ice_vtx(volatile struct ice_tx_desc *txdp, } } +static __rte_always_inline void +ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + static inline uint16_t ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; - struct ice_tx_entry *txep; + struct ice_vec_tx_entry *txep; uint16_t n, nb_commit, tx_id; uint64_t flags = ICE_TD_CMD; uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD; @@ -840,7 +942,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_avx512(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -848,13 +950,14 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txep = (void *)txq->sw_ring; + txep += tx_id; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); n = (uint16_t)(txq->nb_tx_desc - tx_id); if (nb_commit >= n) { - ice_tx_backlog_entry(txep, tx_pkts, n); + ice_tx_backlog_entry_avx512(txep, tx_pkts, n); ice_vtx(txdp, tx_pkts, n - 1, flags); tx_pkts += (n - 1); @@ -868,11 +971,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); /* avoid reach the end of ring */ - txdp = &txq->tx_ring[tx_id]; - txep = &txq->sw_ring[tx_id]; + txdp = txq->tx_ring; + txep = (void *)txq->sw_ring; } - ice_tx_backlog_entry(txep, tx_pkts, nb_commit); + ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit); ice_vtx(txdp, tx_pkts, nb_commit, flags); diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index e2019c8d6a..ae2ac29f2a 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -189,16 +189,38 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) * so need to free remains more carefully. */ i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { + +#ifdef CC_AVX512_SUPPORT + struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev; + + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) { + struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; + + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + } else +#endif + { + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } - i = 0; - } - for (; i < txq->tx_tail; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; } } -- 2.17.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong ` (2 preceding siblings ...) 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong @ 2020-10-23 9:39 ` Zhang, Qi Z 3 siblings, 0 replies; 25+ messages in thread From: Zhang, Qi Z @ 2020-10-23 9:39 UTC (permalink / raw) To: Rong, Leyi, Lu, Wenzhuo, burce.richardson; +Cc: dev > -----Original Message----- > From: Rong, Leyi <leyi.rong@intel.com> > Sent: Friday, October 23, 2020 12:14 PM > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Wenzhuo > <wenzhuo.lu@intel.com>; burce.richardson@intel.com > Cc: dev@dpdk.org; Rong, Leyi <leyi.rong@intel.com> > Subject: [PATCH v4 0/3] AVX512 vPMD on ice > > This patchset aims to support AVX512 vPMD on ice. > > --- > v4: > - Replace buf_physaddr by buf_iova in the 1st patch to eliminate build > error. > > v3: > - Code rebased. > > v2: > - No internal judgement when RTE_MACHINE_CPUFLAG_AVX512F is set in > meson.build. > - Add RSS hash parsing as default RXDID is set to #22. > - Use buf_iova instead of buf_physaddr, as buf_physaddr will be removed. > > Leyi Rong (3): > net/ice: add AVX512 vector path > net/ice: add RSS hash parsing in AVX512 path > net/ice: optimize Tx path on AVX512 vPMD > > drivers/net/ice/ice_rxtx.c | 90 ++- > drivers/net/ice/ice_rxtx.h | 11 + > drivers/net/ice/ice_rxtx_vec_avx512.c | 1018 +++++++++++++++++++++++++ > drivers/net/ice/ice_rxtx_vec_common.h | 36 +- > drivers/net/ice/meson.build | 11 + > 5 files changed, 1141 insertions(+), 25 deletions(-) create mode 100644 > drivers/net/ice/ice_rxtx_vec_avx512.c > > -- > 2.17.1 Acked-by: Qi Zhang <qi.z.zhang@intel.com> Applied to dpdk-next-net-intel. Thanks Qi ^ permalink raw reply [flat|nested] 25+ messages in thread
end of thread, other threads:[~2020-10-27 10:22 UTC | newest] Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2020-09-10 6:55 [dpdk-dev] [PATCH v1 0/2] AVX512 vPMD on ice Leyi Rong 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 1/2] net/ice: add AVX512 vector path Leyi Rong 2020-09-10 9:32 ` Bruce Richardson 2020-09-10 6:55 ` [dpdk-dev] [PATCH v1 2/2] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2020-09-15 1:17 ` Wang, Haiyue 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 0/3] AVX512 vPMD on ice Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong 2020-09-18 3:35 ` [dpdk-dev] [PATCH v2 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong 2020-10-20 10:51 ` [dpdk-dev] [PATCH v3 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 1/3] net/ice: add AVX512 vector path Leyi Rong 2020-10-25 16:23 ` David Marchand 2020-10-26 7:12 ` Rong, Leyi 2020-10-26 8:09 ` David Marchand 2020-10-27 10:19 ` Bruce Richardson 2020-10-27 10:22 ` Ferruh Yigit 2020-10-27 8:32 ` Ali Alnubani 2020-10-27 8:42 ` Ali Alnubani 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 2/3] net/ice: add RSS hash parsing in AVX512 path Leyi Rong 2020-10-23 4:14 ` [dpdk-dev] [PATCH v4 3/3] net/ice: optimize Tx path on AVX512 vPMD Leyi Rong 2020-10-23 9:39 ` [dpdk-dev] [PATCH v4 0/3] AVX512 vPMD on ice Zhang, Qi Z
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).