From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E2D60A0A0E; Thu, 15 Apr 2021 11:23:44 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 35D1A162185; Thu, 15 Apr 2021 11:23:43 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by mails.dpdk.org (Postfix) with ESMTP id 52A97162170 for ; Thu, 15 Apr 2021 11:23:39 +0200 (CEST) IronPort-SDR: +qCBhYVS0Fwo1uW/+eSAInBX7WvgIenyFiyCOJZTukMcx9R+Z7zaELTyRlKssUDp2yHZV5HjLf GX0K0xLjX5gQ== X-IronPort-AV: E=McAfee;i="6200,9189,9954"; a="182320657" X-IronPort-AV: E=Sophos;i="5.82,223,1613462400"; d="scan'208";a="182320657" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Apr 2021 02:23:38 -0700 IronPort-SDR: FeP/gOfnEAYLMG+0nCOL5+d3PiIU250bvwZZyc3TXNeZhmhvzVQvRs935dOHdBMB4eNog5hUgp 6UqFxFCOAooQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,223,1613462400"; d="scan'208";a="452841428" Received: from dpdk-lrong-srv-04.sh.intel.com ([10.67.119.221]) by fmsmga002.fm.intel.com with ESMTP; 15 Apr 2021 02:23:37 -0700 From: Leyi Rong To: qi.z.zhang@intel.com, wenzhuo.lu@intel.com Cc: dev@dpdk.org, Leyi Rong Date: Thu, 15 Apr 2021 16:58:10 +0800 Message-Id: <20210415085811.56429-2-leyi.rong@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20210415085811.56429-1-leyi.rong@intel.com> References: <20210317091409.11725-1-leyi.rong@intel.com> <20210415085811.56429-1-leyi.rong@intel.com> Subject: [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx AVX512 offload path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add alternative Tx data path for AVX512 which can support partial Tx offload features, including Tx checksum offload, vlan/QinQ insertion offload. Signed-off-by: Leyi Rong Signed-off-by: Wenzhuo Lu --- drivers/net/ice/ice_rxtx.c | 28 +++++-- drivers/net/ice/ice_rxtx.h | 3 + drivers/net/ice/ice_rxtx_vec_avx2.c | 2 +- drivers/net/ice/ice_rxtx_vec_avx512.c | 58 +++++++++++--- drivers/net/ice/ice_rxtx_vec_common.h | 106 ++++++++++++++++++++++---- drivers/net/ice/ice_rxtx_vec_sse.c | 2 +- 6 files changed, 164 insertions(+), 35 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index 0827db9c9e..75326c76ab 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -8,6 +8,7 @@ #include "rte_pmd_ice.h" #include "ice_rxtx.h" +#include "ice_rxtx_vec_common.h" #define ICE_TX_CKSUM_OFFLOAD_MASK ( \ PKT_TX_IP_CKSUM | \ @@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct ice_tx_queue *txq; int i; + int tx_check_ret; bool use_avx512 = false; bool use_avx2 = false; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - if (!ice_tx_vec_dev_check(dev) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { + tx_check_ret = ice_tx_vec_dev_check(dev); + if (tx_check_ret >= 0 && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { ad->tx_vec_allowed = true; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; @@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev) PMD_DRV_LOG(NOTICE, "AVX512 is not supported in build env"); #endif - if (!use_avx512 && + if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH && (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; + if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) + ad->tx_vec_allowed = false; + } else { ad->tx_vec_allowed = false; } @@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_vec_allowed) { if (use_avx512) { #ifdef CC_AVX512_SUPPORT - PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).", - dev->data->port_id); - dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) { + PMD_DRV_LOG(NOTICE, + "Using AVX512 OFFLOAD Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = + ice_xmit_pkts_vec_avx512_offload; + } else { + PMD_DRV_LOG(NOTICE, + "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512; + } #endif } else { PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", @@ -3343,6 +3358,7 @@ static const struct { #ifdef RTE_ARCH_X86 #ifdef CC_AVX512_SUPPORT { ice_xmit_pkts_vec_avx512, "Vector AVX512" }, + { ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" }, #endif { ice_xmit_pkts_vec_avx2, "Vector AVX2" }, { ice_xmit_pkts_vec, "Vector SSE" }, diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index 99096e4c21..f72fad0255 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue, uint16_t nb_pkts); uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc); int ice_tx_done_cleanup(void *txq, uint32_t free_cnt); int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc); diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 83dcdf15d4..8d4bd6df1b 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -853,7 +853,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_vec(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index a668b82232..1c4a59a170 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -982,23 +982,26 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq) return txq->tx_rs_thresh; } -static inline void +static __rte_always_inline void ice_vtx1(volatile struct ice_tx_desc *txdp, - struct rte_mbuf *pkt, uint64_t flags) + struct rte_mbuf *pkt, uint64_t flags, bool do_offload) { uint64_t high_qw = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + if (do_offload) + ice_txd_enable_offload(pkt, &high_qw); + __m128i descriptor = _mm_set_epi64x(high_qw, pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } -static inline void -ice_vtx(volatile struct ice_tx_desc *txdp, - struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +static __rte_always_inline void +ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt, + uint16_t nb_pkts, uint64_t flags, bool do_offload) { const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); @@ -1008,18 +1011,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp, hi_qw_tmpl | ((uint64_t)pkt[3]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (do_offload) + ice_txd_enable_offload(pkt[3], &hi_qw3); uint64_t hi_qw2 = hi_qw_tmpl | ((uint64_t)pkt[2]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (do_offload) + ice_txd_enable_offload(pkt[2], &hi_qw2); uint64_t hi_qw1 = hi_qw_tmpl | ((uint64_t)pkt[1]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (do_offload) + ice_txd_enable_offload(pkt[1], &hi_qw1); uint64_t hi_qw0 = hi_qw_tmpl | ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (do_offload) + ice_txd_enable_offload(pkt[0], &hi_qw0); __m512i desc0_3 = _mm512_set_epi64 @@ -1036,7 +1047,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp, /* do any last ones */ while (nb_pkts) { - ice_vtx1(txdp, *pkt, flags); + ice_vtx1(txdp, *pkt, flags, do_offload); txdp++, pkt++, nb_pkts--; } } @@ -1051,9 +1062,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep, txep[i].mbuf = tx_pkts[i]; } -static inline uint16_t +static __rte_always_inline uint16_t ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) + uint16_t nb_pkts, bool do_offload) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; @@ -1083,11 +1094,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, if (nb_commit >= n) { ice_tx_backlog_entry_avx512(txep, tx_pkts, n); - ice_vtx(txdp, tx_pkts, n - 1, flags); + ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload); tx_pkts += (n - 1); txdp += (n - 1); - ice_vtx1(txdp, *tx_pkts++, rs); + ice_vtx1(txdp, *tx_pkts++, rs, do_offload); nb_commit = (uint16_t)(nb_commit - n); @@ -1101,7 +1112,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit); - ice_vtx(txdp, tx_pkts, nb_commit, flags); + ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload); tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->tx_next_rs) { @@ -1131,7 +1142,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts, num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, - &tx_pkts[nb_tx], num); + &tx_pkts[nb_tx], num, false); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + +uint16_t +ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = ice_xmit_fixed_burst_vec_avx512(tx_queue, + &tx_pkts[nb_tx], num, true); + nb_tx += ret; nb_pkts -= ret; if (ret < num) diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index a5d76a2936..942647f4e9 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -77,7 +77,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, } static __rte_always_inline int -ice_tx_free_bufs(struct ice_tx_queue *txq) +ice_tx_free_bufs_vec(struct ice_tx_queue *txq) { struct ice_tx_entry *txep; uint32_t n; @@ -197,7 +197,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) #ifdef CC_AVX512_SUPPORT struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev; - if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) { + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 || + dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) { struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; if (txq->tx_tail < i) { @@ -267,29 +268,39 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) return 0; } -#define ICE_NO_VECTOR_FLAGS ( \ - DEV_TX_OFFLOAD_MULTI_SEGS | \ - DEV_TX_OFFLOAD_VLAN_INSERT | \ - DEV_TX_OFFLOAD_IPV4_CKSUM | \ - DEV_TX_OFFLOAD_SCTP_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_TSO | \ +#define ICE_TX_NO_VECTOR_FLAGS ( \ + DEV_TX_OFFLOAD_MULTI_SEGS | \ + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_TCP_TSO) + +#define ICE_TX_VECTOR_OFFLOAD ( \ + DEV_TX_OFFLOAD_VLAN_INSERT | \ + DEV_TX_OFFLOAD_QINQ_INSERT | \ + DEV_TX_OFFLOAD_IPV4_CKSUM | \ + DEV_TX_OFFLOAD_SCTP_CKSUM | \ + DEV_TX_OFFLOAD_UDP_CKSUM | \ DEV_TX_OFFLOAD_TCP_CKSUM) +#define ICE_VECTOR_PATH 0 +#define ICE_VECTOR_OFFLOAD_PATH 1 + static inline int ice_tx_vec_queue_default(struct ice_tx_queue *txq) { if (!txq) return -1; - if (txq->offloads & ICE_NO_VECTOR_FLAGS) - return -1; - if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) return -1; - return 0; + if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS) + return -1; + + if (txq->offloads & ICE_TX_VECTOR_OFFLOAD) + return ICE_VECTOR_OFFLOAD_PATH; + + return ICE_VECTOR_PATH; } static inline int @@ -312,14 +323,19 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct ice_tx_queue *txq; + int ret = 0; + int result = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (ice_tx_vec_queue_default(txq)) + ret = ice_tx_vec_queue_default(txq); + if (ret < 0) return -1; + if (ret == ICE_VECTOR_OFFLOAD_PATH) + result = ret; } - return 0; + return result; } #ifdef CC_AVX2_SUPPORT @@ -521,4 +537,64 @@ ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512) } #endif +static inline void +ice_txd_enable_offload(struct rte_mbuf *tx_pkt, + uint64_t *txd_hi) +{ + uint64_t ol_flags = tx_pkt->ol_flags; + uint32_t td_cmd = 0; + uint32_t td_offset = 0; + + /* Tx Checksum Offload */ + /* SET MACLEN */ + td_offset |= (tx_pkt->l2_len >> 1) << + ICE_TX_DESC_LEN_MACLEN_S; + + /* Enable L3 checksum offload */ + if (ol_flags & PKT_TX_IP_CKSUM) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & PKT_TX_IPV4) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & PKT_TX_IPV6) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; + td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case PKT_TX_SCTP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; + td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case PKT_TX_UDP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; + td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + default: + break; + } + + *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S; + + /* Tx VLAN/QINQ insertion Offload */ + if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) { + td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; + *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << + ICE_TXD_QW1_L2TAG1_S); + } + + *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S; +} #endif diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 3e467c48f1..6029cc2d99 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_vec(txq); nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); nb_commit = nb_pkts; -- 2.17.1