DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path
@ 2021-03-17  9:14 Leyi Rong
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 1/2] net/ice: add Tx " Leyi Rong
                   ` (4 more replies)
  0 siblings, 5 replies; 15+ messages in thread
From: Leyi Rong @ 2021-03-17  9:14 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
offload features, like checksum/vlan/RSS/QinQ offload.

Leyi Rong (2):
  net/ice: add Tx AVX512 offload path
  net/ice: add Rx AVX512 offload path

 drivers/net/ice/ice_rxtx.c            |  73 ++-
 drivers/net/ice/ice_rxtx.h            |  10 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c | 653 ++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx_vec_common.h | 127 ++++-
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 825 insertions(+), 42 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/ice: add Tx AVX512 offload path
  2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
@ 2021-03-17  9:14 ` Leyi Rong
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 2/2] net/ice: add Rx " Leyi Rong
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-03-17  9:14 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Tx data path for AVX512 which can support partial
Tx offload features, including Tx checksum offload, vlan/QinQ
insertion offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  27 ++++-
 drivers/net/ice/ice_rxtx.h            |   3 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c | 157 ++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx_vec_common.h | 104 ++++++++++++++---
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 271 insertions(+), 24 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 530c206f4f..73f4342187 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -8,6 +8,7 @@
 
 #include "rte_pmd_ice.h"
 #include "ice_rxtx.h"
+#include "ice_rxtx_vec_common.h"
 
 #define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
 		PKT_TX_IP_CKSUM |		 \
@@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
+	int tx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_tx_vec_dev_check(dev) &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		tx_check_ret = ice_tx_vec_dev_check(dev);
+		if (tx_check_ret >= 0 &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->tx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
 				txq = dev->data->tx_queues[i];
@@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(NOTICE,
 				"AVX512 is not supported in build env");
 #endif
-			if (!use_avx512 &&
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
 				use_avx2 = true;
 
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+				ad->tx_vec_allowed = false;
+
 		} else {
 			ad->tx_vec_allowed = false;
 		}
@@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	if (ad->tx_vec_allowed) {
 		if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-			PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
-				    dev->data->port_id);
-			dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst =
+					ice_xmit_pkts_vec_avx512_offload;
+			} else {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			}
 #endif
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 99096e4c21..f72fad0255 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
+					  struct rte_mbuf **tx_pkts,
+					  uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 1cc54903c6..e3b650edcc 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -969,7 +969,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	if (unlikely(nb_pkts == 0))
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 0e5a676e68..a112d7838e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -1137,3 +1137,160 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	return nb_tx;
 }
+
+static inline void
+ice_vtx1_offload(volatile struct ice_tx_desc *txdp,
+		 struct rte_mbuf *pkt, uint64_t flags)
+{
+	uint64_t high_qw =
+		(ICE_TX_DESC_DTYPE_DATA |
+		 ((uint64_t)flags << ICE_TXD_QW1_CMD_S) |
+		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
+	ice_txd_enable_offload(pkt, &high_qw);
+
+	__m128i descriptor = _mm_set_epi64x(high_qw,
+					pkt->buf_iova + pkt->data_off);
+	_mm_storeu_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+ice_vtx_offload(volatile struct ice_tx_desc *txdp,
+		struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+	const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
+			((uint64_t)flags << ICE_TXD_QW1_CMD_S));
+
+	for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) {
+		uint64_t hi_qw3 =
+			hi_qw_tmpl |
+			((uint64_t)pkt[3]->data_len <<
+			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		ice_txd_enable_offload(pkt[3], &hi_qw3);
+		uint64_t hi_qw2 =
+			hi_qw_tmpl |
+			((uint64_t)pkt[2]->data_len <<
+			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		ice_txd_enable_offload(pkt[2], &hi_qw2);
+		uint64_t hi_qw1 =
+			hi_qw_tmpl |
+			((uint64_t)pkt[1]->data_len <<
+			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		ice_txd_enable_offload(pkt[1], &hi_qw1);
+		uint64_t hi_qw0 =
+			hi_qw_tmpl |
+			((uint64_t)pkt[0]->data_len <<
+			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		ice_txd_enable_offload(pkt[0], &hi_qw0);
+
+		__m512i desc0_3 =
+			_mm512_set_epi64
+			(hi_qw3,
+			 pkt[3]->buf_iova + pkt[3]->data_off,
+			 hi_qw2,
+			 pkt[2]->buf_iova + pkt[2]->data_off,
+			 hi_qw1,
+			 pkt[1]->buf_iova + pkt[1]->data_off,
+			 hi_qw0,
+			 pkt[0]->buf_iova + pkt[0]->data_off);
+		_mm512_storeu_si512((void *)txdp, desc0_3);
+	}
+
+	/* do any last ones */
+	while (nb_pkts) {
+		ice_vtx1_offload(txdp, *pkt, flags);
+		txdp++, pkt++, nb_pkts--;
+	}
+}
+
+static inline uint16_t
+ice_xmit_fixed_burst_vec_avx512_offload(void *tx_queue,
+					struct rte_mbuf **tx_pkts,
+					uint16_t nb_pkts)
+{
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	volatile struct ice_tx_desc *txdp;
+	struct ice_vec_tx_entry *txep;
+	uint16_t n, nb_commit, tx_id;
+	uint64_t flags = ICE_TD_CMD;
+	uint64_t rs = ICE_TX_DESC_CMD_RS | ICE_TD_CMD;
+
+	/* cross rx_thresh boundary is not allowed */
+	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+	if (txq->nb_tx_free < txq->tx_free_thresh)
+		ice_tx_free_bufs_avx512(txq);
+
+	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+	if (unlikely(nb_pkts == 0))
+		return 0;
+
+	tx_id = txq->tx_tail;
+	txdp = &txq->tx_ring[tx_id];
+	txep = (void *)txq->sw_ring;
+	txep += tx_id;
+
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+	n = (uint16_t)(txq->nb_tx_desc - tx_id);
+	if (nb_commit >= n) {
+		ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
+
+		ice_vtx_offload(txdp, tx_pkts, n - 1, flags);
+		tx_pkts += (n - 1);
+		txdp += (n - 1);
+
+		ice_vtx1_offload(txdp, *tx_pkts++, rs);
+
+		nb_commit = (uint16_t)(nb_commit - n);
+
+		tx_id = 0;
+		txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+		/* avoid reach the end of ring */
+		txdp = txq->tx_ring;
+		txep = (void *)txq->sw_ring;
+	}
+
+	ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
+
+	ice_vtx_offload(txdp, tx_pkts, nb_commit, flags);
+
+	tx_id = (uint16_t)(tx_id + nb_commit);
+	if (tx_id > txq->tx_next_rs) {
+		txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+			rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
+					ICE_TXD_QW1_CMD_S);
+		txq->tx_next_rs =
+			(uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+	}
+
+	txq->tx_tail = tx_id;
+
+	ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+	return nb_pkts;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+				 uint16_t nb_pkts)
+{
+	uint16_t nb_tx = 0;
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	while (nb_pkts) {
+		uint16_t ret, num;
+
+		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = ice_xmit_fixed_burst_vec_avx512_offload(tx_queue,
+				&tx_pkts[nb_tx], num);
+
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < num)
+			break;
+	}
+
+	return nb_tx;
+}
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index c09ac7f667..d5419e6ba4 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -73,7 +73,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
 {
 	struct ice_tx_entry *txep;
 	uint32_t n;
@@ -193,7 +193,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 #ifdef CC_AVX512_SUPPORT
 	struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
 
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
+	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
 		struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		if (txq->tx_tail < i) {
@@ -263,29 +264,38 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
-#define ICE_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_TSO |		 \
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
 		DEV_TX_OFFLOAD_TCP_CKSUM)
 
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
 
-	if (txq->offloads & ICE_NO_VECTOR_FLAGS)
-		return -1;
-
 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
 		return -1;
 
-	return 0;
+	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+		return -1;
+
+	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
+
+	return ICE_VECTOR_PATH;
 }
 
 static inline int
@@ -308,14 +318,76 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_tx_queue *txq;
+	int ret = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (ice_tx_vec_queue_default(txq))
-			return -1;
+		ret = ice_tx_vec_queue_default(txq);
+		if (ret < 0)
+			break;
 	}
 
-	return 0;
+	return ret;
 }
 
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+		       uint64_t *txd_hi)
+{
+	uint64_t ol_flags = tx_pkt->ol_flags;
+	uint32_t td_cmd = 0;
+	uint32_t td_offset = 0;
+
+	/* Tx Checksum Offload */
+	/* SET MACLEN */
+	td_offset |= (tx_pkt->l2_len >> 1) <<
+			ICE_TX_DESC_LEN_MACLEN_S;
+
+	/* Enable L3 checksum offload */
+	if (ol_flags & PKT_TX_IP_CKSUM) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV4) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV6) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	}
+
+	/* Enable L4 checksum offloads */
+	switch (ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_TCP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_SCTP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_UDP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	default:
+		break;
+	}
+
+	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+	/* Tx VLAN/QINQ insertion Offload */
+	if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+				ICE_TXD_QW1_L2TAG1_S);
+	}
+
+	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
 #endif
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 3e467c48f1..6029cc2d99 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	nb_commit = nb_pkts;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/ice: add Rx AVX512 offload path
  2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 1/2] net/ice: add Tx " Leyi Rong
@ 2021-03-17  9:14 ` Leyi Rong
  2021-03-18 10:35   ` Van Haaren, Harry
  2021-04-12  4:03 ` [dpdk-dev] [PATCH v3 0/2] add alternative " Leyi Rong
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 15+ messages in thread
From: Leyi Rong @ 2021-03-17  9:14 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  46 ++-
 drivers/net/ice/ice_rxtx.h            |   7 +
 drivers/net/ice/ice_rxtx_vec_avx512.c | 496 ++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx_vec_common.h |  49 ++-
 4 files changed, 567 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 73f4342187..16b374ac8d 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t len;
 	int use_def_burst_func = 1;
+	uint64_t offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx]) {
 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
+	rxq->offloads = offloads;
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
@@ -3052,12 +3056,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_rx_queue *rxq;
 	int i;
+	int rx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
@@ -3091,11 +3097,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		if (dev->data->scattered_rx) {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Scattered Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_scattered_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3123,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		} else {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f72fad0255..b29387ca0f 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
+	uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+					  struct rte_mbuf **rx_pkts,
+					  uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    struct rte_mbuf **rx_pkts,
 					    uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+						    struct rte_mbuf **rx_pkts,
+						    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index a112d7838e..bf0ac04be6 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -221,6 +221,422 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+	uint16_t i, received;
+
+	for (i = 0, received = 0; i < nb_pkts;
+	     i += ICE_DESCS_PER_LOOP_AVX,
+	     rxdp += ICE_DESCS_PER_LOOP_AVX) {
+		/* step 1, copy over 8 mbuf pointers to rx_pkts array */
+		_mm256_storeu_si256((void *)&rx_pkts[i],
+				    _mm256_loadu_si256((void *)&sw_ring[i]));
+#ifdef RTE_ARCH_X86_64
+		_mm256_storeu_si256
+			((void *)&rx_pkts[i + 4],
+			 _mm256_loadu_si256((void *)&sw_ring[i + 4]));
+#endif
+
+		__m512i raw_desc0_3, raw_desc4_7;
+		__m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7;
+
+		/* load in descriptors, in reverse order */
+		const __m128i raw_desc7 =
+			_mm_load_si128((void *)(rxdp + 7));
+		rte_compiler_barrier();
+		const __m128i raw_desc6 =
+			_mm_load_si128((void *)(rxdp + 6));
+		rte_compiler_barrier();
+		const __m128i raw_desc5 =
+			_mm_load_si128((void *)(rxdp + 5));
+		rte_compiler_barrier();
+		const __m128i raw_desc4 =
+			_mm_load_si128((void *)(rxdp + 4));
+		rte_compiler_barrier();
+		const __m128i raw_desc3 =
+			_mm_load_si128((void *)(rxdp + 3));
+		rte_compiler_barrier();
+		const __m128i raw_desc2 =
+			_mm_load_si128((void *)(rxdp + 2));
+		rte_compiler_barrier();
+		const __m128i raw_desc1 =
+			_mm_load_si128((void *)(rxdp + 1));
+		rte_compiler_barrier();
+		const __m128i raw_desc0 =
+			_mm_load_si128((void *)(rxdp + 0));
+
+		raw_desc6_7 =
+			_mm256_inserti128_si256
+				(_mm256_castsi128_si256(raw_desc6),
+				 raw_desc7, 1);
+		raw_desc4_5 =
+			_mm256_inserti128_si256
+				(_mm256_castsi128_si256(raw_desc4),
+				 raw_desc5, 1);
+		raw_desc2_3 =
+			_mm256_inserti128_si256
+				(_mm256_castsi128_si256(raw_desc2),
+				 raw_desc3, 1);
+		raw_desc0_1 =
+			_mm256_inserti128_si256
+				(_mm256_castsi128_si256(raw_desc0),
+				 raw_desc1, 1);
+
+		raw_desc4_7 =
+			_mm512_inserti64x4
+				(_mm512_castsi256_si512(raw_desc4_5),
+				 raw_desc6_7, 1);
+		raw_desc0_3 =
+			_mm512_inserti64x4
+				(_mm512_castsi256_si512(raw_desc0_1),
+				 raw_desc2_3, 1);
+
+		if (split_packet) {
+			int j;
+
+			for (j = 0; j < ICE_DESCS_PER_LOOP_AVX; j++)
+				rte_mbuf_prefetch_part2(rx_pkts[i + j]);
+		}
+
+		/**
+		 * convert descriptors 0-7 into mbufs, re-arrange fields.
+		 * Then write into the mbuf.
+		 */
+		__m512i mb4_7 = _mm512_shuffle_epi8(raw_desc4_7, shuf_msk);
+		__m512i mb0_3 = _mm512_shuffle_epi8(raw_desc0_3, shuf_msk);
+
+		mb4_7 = _mm512_add_epi32(mb4_7, crc_adjust);
+		mb0_3 = _mm512_add_epi32(mb0_3, crc_adjust);
+
+		/**
+		 * to get packet types, ptype is located in bit16-25
+		 * of each 128bits
+		 */
+		const __m512i ptype_mask =
+			_mm512_set1_epi16(ICE_RX_FLEX_DESC_PTYPE_M);
+
+		/**
+		 * to get packet types, ptype is located in bit16-25
+		 * of each 128bits
+		 */
+		const __m512i ptypes4_7 =
+			_mm512_and_si512(raw_desc4_7, ptype_mask);
+		const __m512i ptypes0_3 =
+			_mm512_and_si512(raw_desc0_3, ptype_mask);
+
+		const __m256i ptypes6_7 =
+			_mm512_extracti64x4_epi64(ptypes4_7, 1);
+		const __m256i ptypes4_5 =
+			_mm512_extracti64x4_epi64(ptypes4_7, 0);
+		const __m256i ptypes2_3 =
+			_mm512_extracti64x4_epi64(ptypes0_3, 1);
+		const __m256i ptypes0_1 =
+			_mm512_extracti64x4_epi64(ptypes0_3, 0);
+		const uint16_t ptype7 = _mm256_extract_epi16(ptypes6_7, 9);
+		const uint16_t ptype6 = _mm256_extract_epi16(ptypes6_7, 1);
+		const uint16_t ptype5 = _mm256_extract_epi16(ptypes4_5, 9);
+		const uint16_t ptype4 = _mm256_extract_epi16(ptypes4_5, 1);
+		const uint16_t ptype3 = _mm256_extract_epi16(ptypes2_3, 9);
+		const uint16_t ptype2 = _mm256_extract_epi16(ptypes2_3, 1);
+		const uint16_t ptype1 = _mm256_extract_epi16(ptypes0_1, 9);
+		const uint16_t ptype0 = _mm256_extract_epi16(ptypes0_1, 1);
+
+		const __m512i ptype4_7 = _mm512_set_epi32
+			(0, 0, 0, ptype_tbl[ptype7],
+			 0, 0, 0, ptype_tbl[ptype6],
+			 0, 0, 0, ptype_tbl[ptype5],
+			 0, 0, 0, ptype_tbl[ptype4]);
+		const __m512i ptype0_3 = _mm512_set_epi32
+			(0, 0, 0, ptype_tbl[ptype3],
+			 0, 0, 0, ptype_tbl[ptype2],
+			 0, 0, 0, ptype_tbl[ptype1],
+			 0, 0, 0, ptype_tbl[ptype0]);
+
+		mb4_7 = _mm512_mask_blend_epi32(0x1111, mb4_7, ptype4_7);
+		mb0_3 = _mm512_mask_blend_epi32(0x1111, mb0_3, ptype0_3);
+
+		__m256i mb4_5 = _mm512_extracti64x4_epi64(mb4_7, 0);
+		__m256i mb6_7 = _mm512_extracti64x4_epi64(mb4_7, 1);
+		__m256i mb0_1 = _mm512_extracti64x4_epi64(mb0_3, 0);
+		__m256i mb2_3 = _mm512_extracti64x4_epi64(mb0_3, 1);
+
+		/**
+		 * use permute/extract to get status content
+		 * After the operations, the packets status flags are in the
+		 * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6]
+		 */
+		/* merge the status bits into one register */
+		const __m512i status_permute_msk = _mm512_set_epi32
+			(0, 0, 0, 0,
+			 0, 0, 0, 0,
+			 22, 30, 6, 14,
+			 18, 26, 2, 10);
+		const __m512i raw_status0_7 = _mm512_permutex2var_epi32
+			(raw_desc4_7, status_permute_msk, raw_desc0_3);
+		__m256i status0_7 = _mm512_extracti64x4_epi64
+			(raw_status0_7, 0);
+
+		__m256i mbuf_flags = _mm256_set1_epi32(0);
+
+		if (rxq->fdir_enabled) {
+			const __m256i fdir_id4_7 =
+				_mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5);
+
+			const __m256i fdir_id0_3 =
+				_mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1);
+
+			const __m256i fdir_id0_7 =
+				_mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
+
+			mbuf_flags =
+				ice_flex_rxd_to_fdir_flags_vec_avx512
+					(fdir_id0_7);
+
+			/* write to mbuf: have to use scalar store here */
+			rx_pkts[i + 0]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 3);
+
+			rx_pkts[i + 1]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 7);
+
+			rx_pkts[i + 2]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 2);
+
+			rx_pkts[i + 3]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 6);
+
+			rx_pkts[i + 4]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 1);
+
+			rx_pkts[i + 5]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 5);
+
+			rx_pkts[i + 6]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 0);
+
+			rx_pkts[i + 7]->hash.fdir.hi =
+				_mm256_extract_epi32(fdir_id0_7, 4);
+		} /* if() on fdir_enabled */
+
+		/**
+		 * At this point, we have the 8 sets of flags in the low 16-bits
+		 * of each 32-bit value in vlan0.
+		 * We want to extract these, and merge them with the mbuf init
+		 * data so we can do a single write to the mbuf to set the flags
+		 * and all the other initialization fields. Extracting the
+		 * appropriate flags means that we have to do a shift and blend
+		 * for each mbuf before we do the write. However, we can also
+		 * add in the previously computed rx_descriptor fields to
+		 * make a single 256-bit write per mbuf
+		 */
+		/* check the structure matches expectations */
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+				 offsetof(struct rte_mbuf, rearm_data) + 8);
+		RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) !=
+				 RTE_ALIGN(offsetof(struct rte_mbuf,
+						    rearm_data),
+					   16));
+		/* build up data and do writes */
+		__m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5,
+			rearm6, rearm7;
+
+		rearm6 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_slli_si256(mbuf_flags, 8),
+					    0x04);
+		rearm4 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_slli_si256(mbuf_flags, 4),
+					    0x04);
+		rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04);
+		rearm0 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_srli_si256(mbuf_flags, 4),
+					    0x04);
+
+		/* permute to add in the rx_descriptor e.g. rss fields */
+		rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20);
+		rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20);
+		rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20);
+		rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20);
+
+		/* write to mbuf */
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data,
+				    rearm6);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data,
+				    rearm4);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data,
+				    rearm2);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data,
+				    rearm0);
+
+		/* repeat for the odd mbufs */
+		const __m256i odd_flags =
+			_mm256_castsi128_si256
+				(_mm256_extracti128_si256(mbuf_flags, 1));
+		rearm7 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_slli_si256(odd_flags, 8),
+					    0x04);
+		rearm5 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_slli_si256(odd_flags, 4),
+					    0x04);
+		rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04);
+		rearm1 = _mm256_blend_epi32(mbuf_init,
+					    _mm256_srli_si256(odd_flags, 4),
+					    0x04);
+
+		/* since odd mbufs are already in hi 128-bits use blend */
+		rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0);
+		rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0);
+		rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0);
+		rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0);
+		/* again write to mbufs */
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data,
+				    rearm7);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data,
+				    rearm5);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data,
+				    rearm3);
+		_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data,
+				    rearm1);
+
+		/* extract and record EOP bit */
+		if (split_packet) {
+			const __m128i eop_mask =
+				_mm_set1_epi16(1 << ICE_RX_DESC_STATUS_EOF_S);
+			const __m256i eop_bits256 = _mm256_and_si256(status0_7,
+								     eop_check);
+			/* pack status bits into a single 128-bit register */
+			const __m128i eop_bits =
+				_mm_packus_epi32
+					(_mm256_castsi256_si128(eop_bits256),
+					 _mm256_extractf128_si256(eop_bits256,
+								  1));
+			/**
+			 * flip bits, and mask out the EOP bit, which is now
+			 * a split-packet bit i.e. !EOP, rather than EOP one.
+			 */
+			__m128i split_bits = _mm_andnot_si128(eop_bits,
+					eop_mask);
+			/**
+			 * eop bits are out of order, so we need to shuffle them
+			 * back into order again. In doing so, only use low 8
+			 * bits, which acts like another pack instruction
+			 * The original order is (hi->lo): 1,3,5,7,0,2,4,6
+			 * [Since we use epi8, the 16-bit positions are
+			 * multiplied by 2 in the eop_shuffle value.]
+			 */
+			__m128i eop_shuffle =
+				_mm_set_epi8(/* zero hi 64b */
+					     0xFF, 0xFF, 0xFF, 0xFF,
+					     0xFF, 0xFF, 0xFF, 0xFF,
+					     /* move values to lo 64b */
+					     8, 0, 10, 2,
+					     12, 4, 14, 6);
+			split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle);
+			*(uint64_t *)split_packet =
+				_mm_cvtsi128_si64(split_bits);
+			split_packet += ICE_DESCS_PER_LOOP_AVX;
+		}
+
+		/* perform dd_check */
+		status0_7 = _mm256_and_si256(status0_7, dd_check);
+		status0_7 = _mm256_packs_epi32(status0_7,
+					       _mm256_setzero_si256());
+
+		uint64_t burst = __builtin_popcountll
+					(_mm_cvtsi128_si64
+						(_mm256_extracti128_si256
+							(status0_7, 1)));
+		burst += __builtin_popcountll
+				(_mm_cvtsi128_si64
+					(_mm256_castsi256_si128(status0_7)));
+		received += burst;
+		if (burst != ICE_DESCS_PER_LOOP_AVX)
+			break;
+	}
+
+	/* update tail pointers */
+	rxq->rx_tail += received;
+	rxq->rx_tail &= (rxq->nb_rx_desc - 1);
+	if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */
+		rxq->rx_tail--;
+		received--;
+	}
+	rxq->rxrearm_nb += received;
+	return received;
+}
+
+static inline uint16_t
+_ice_recv_raw_pkts_vec_avx512_offload(struct ice_rx_queue *rxq,
+				      struct rte_mbuf **rx_pkts,
+				      uint16_t nb_pkts, uint8_t *split_packet)
+{
+	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
+			0, rxq->mbuf_initializer);
+	struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail];
+	volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
+
+	rte_prefetch0(rxdp);
+
+	/* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP_AVX */
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP_AVX);
+
+	/* See if we need to rearm the RX queue - gives the prefetch a bit
+	 * of time to act
+	 */
+	if (rxq->rxrearm_nb > ICE_RXQ_REARM_THRESH)
+		ice_rxq_rearm(rxq);
+
+	/* Before we start moving massive data around, check to see if
+	 * there is actually a packet available
+	 */
+	if (!(rxdp->wb.status_error0 &
+			rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)))
+		return 0;
+
+	/* constants used in processing loop */
+	const __m512i crc_adjust =
+		_mm512_set4_epi32
+			(0,             /* ignore non-length fields */
+			 -rxq->crc_len, /* sub crc on data_len */
+			 -rxq->crc_len, /* sub crc on pkt_len */
+			 0              /* ignore non-length fields */
+			);
+
+	/* 8 packets DD mask, LSB in each 32-bit value */
+	const __m256i dd_check = _mm256_set1_epi32(1);
+
+	/* 8 packets EOP mask, second-LSB in each 32-bit value */
+	const __m256i eop_check = _mm256_slli_epi32(dd_check,
+			ICE_RX_DESC_STATUS_EOF_S);
+
+	/* mask to shuffle from desc. to mbuf (4 descriptors)*/
+	const __m512i shuf_msk =
+		_mm512_set4_epi32
+			(/* rss hash parsed separately */
+			 0xFFFFFFFF,
+			 /* octet 10~11, 16 bits vlan_macip */
+			 /* octet 4~5, 16 bits data_len */
+			 11 << 24 | 10 << 16 | 5 << 8 | 4,
+			 /* skip hi 16 bits pkt_len, zero out */
+			 /* octet 4~5, 16 bits pkt_len */
+			 0xFFFF << 16 | 5 << 8 | 4,
+			 /* pkt_type set as unknown */
+			 0xFFFFFFFF
+			);
+
+	/**
+	 * compile-time check the above crc and shuffle layout is correct.
+	 * NOTE: the first field (lowest address) is given last in set_epi
+	 * calls above.
+	 */
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10);
+	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
+			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+
+	/* following code block is for Rx Checksum Offload */
 	/* Status/Error flag masks */
 	/**
 	 * mask everything except Checksum Reports, RSS indication
@@ -806,6 +1222,18 @@ ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return _ice_recv_raw_pkts_vec_avx512_offload(rx_queue, rx_pkts,
+						     nb_pkts, NULL);
+}
+
 /**
  * vPMD receive routine that reassembles single burst of 32 scattered packets
  * Notice:
@@ -847,6 +1275,48 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 					     &split_flags[i]);
 }
 
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+					    struct rte_mbuf **rx_pkts,
+					    uint16_t nb_pkts)
+{
+	struct ice_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512_offload(rxq,
+				rx_pkts, nb_pkts, split_flags);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+	if (!rxq->pkt_first_seg &&
+	    split_fl64[0] == 0 && split_fl64[1] == 0 &&
+	    split_fl64[2] == 0 && split_fl64[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly */
+	unsigned int i = 0;
+
+	if (!rxq->pkt_first_seg) {
+		/* find the first split flag, and only reassemble then */
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+	return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+					     &split_flags[i]);
+}
+
 /**
  * vPMD receive routine that reassembles scattered packets.
  * Main receive routine that can handle arbitrary burst sizes
@@ -871,6 +1341,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+					   struct rte_mbuf **rx_pkts,
+					   uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > ICE_VPMD_RX_BURST) {
+		uint16_t burst =
+			ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, ICE_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < ICE_VPMD_RX_BURST)
+			return retval;
+	}
+	return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index d5419e6ba4..e555f797e1 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -243,6 +243,27 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
+		DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (				\
+		DEV_RX_OFFLOAD_CHECKSUM |		\
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_RX_OFFLOAD_VLAN |			\
+		DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -261,23 +282,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH		0
-#define ICE_VECTOR_OFFLOAD_PATH	1
+	return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -303,14 +312,16 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_rx_queue *rxq;
+	int ret = 0;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
-		if (ice_rx_vec_queue_default(rxq))
-			return -1;
+		ret = (ice_rx_vec_queue_default(rxq));
+		if (ret < 0)
+			break;
 	}
 
-	return 0;
+	return ret;
 }
 
 static inline int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH 2/2] net/ice: add Rx AVX512 offload path
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 2/2] net/ice: add Rx " Leyi Rong
@ 2021-03-18 10:35   ` Van Haaren, Harry
  0 siblings, 0 replies; 15+ messages in thread
From: Van Haaren, Harry @ 2021-03-18 10:35 UTC (permalink / raw)
  To: Rong, Leyi, Zhang, Qi Z, Lu, Wenzhuo; +Cc: dpdk-dev

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong
> Sent: Wednesday, March 17, 2021 9:14 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org; Rong, Leyi <leyi.rong@intel.com>
> Subject: [dpdk-dev] [PATCH 2/2] net/ice: add Rx AVX512 offload path
> 
> Split AVX512 Rx data path into two, one is for basic,
> the other one can support additional Rx offload features,
> including Rx checksum offload, Rx vlan offload, RSS offload.
> 
> Signed-off-by: Leyi Rong <leyi.rong@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>


Hi Leyi and Wenzhou,

I'm a bit concerned over code-duplication of the RX datapath in this patch,
as it duplicates the core desc-to-mbuf RX loop.

I loaded the following functions, and compared in "meld" to view the diff
side-by-side, and it should be possible to "specialize" away the differences:
_ice_recv_raw_pkts_vec_avx512() /* original */
_ice_recv_raw_pkts_vec_avx512_offload() /* with offload */

Specializing the implementation (adding "do_offload" parameter to _ice_recv_raw_pkts_vec_avx512()),
and branch on it with an   if(do_offload)  when the offload and non-offload paths behave differently.

When inlining that function the compiler will remove the branches, and you'll only have one version
of the code to maintain, without any performance penalty.

If my suggestion around parameterizing, specializing and inlining isn't clear, please ask and I can
try to explain better.

Regards, -Harry

<snip patch contents>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 0/2] add alternative AVX512 offload path
  2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 1/2] net/ice: add Tx " Leyi Rong
  2021-03-17  9:14 ` [dpdk-dev] [PATCH 2/2] net/ice: add Rx " Leyi Rong
@ 2021-04-12  4:03 ` Leyi Rong
  2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 1/2] net/ice: add Tx " Leyi Rong
  2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 2/2] net/ice: add Rx " Leyi Rong
  2021-04-15  1:13 ` [dpdk-dev] [PATCH v4 0/2] add alternative " Leyi Rong
  2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
  4 siblings, 2 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-12  4:03 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
offload features, like checksum/vlan/RSS/QinQ offload.

---
v3:
- complete ice_dev_supported_ptypes_get() for new adding offload
  functions.
- complete tx/rx burst infos for new adding offload functions.

v2:
- add "do_offload" parameter to according functions for reducing code
  duplication.

Leyi Rong (2):
  net/ice: add Tx AVX512 offload path
  net/ice: add Rx AVX512 offload path

 drivers/net/ice/ice_rxtx.c            |  78 +++--
 drivers/net/ice/ice_rxtx.h            |  10 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c | 403 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h | 129 +++++++--
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 448 insertions(+), 176 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 1/2] net/ice: add Tx AVX512 offload path
  2021-04-12  4:03 ` [dpdk-dev] [PATCH v3 0/2] add alternative " Leyi Rong
@ 2021-04-12  4:03   ` Leyi Rong
  2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 2/2] net/ice: add Rx " Leyi Rong
  1 sibling, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-12  4:03 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Tx data path for AVX512 which can support partial
Tx offload features, including Tx checksum offload, vlan/QinQ
insertion offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  28 +++++--
 drivers/net/ice/ice_rxtx.h            |   3 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c |  58 +++++++++++---
 drivers/net/ice/ice_rxtx_vec_common.h | 105 ++++++++++++++++++++++----
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 163 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 530c206f4f..d7c1e2164b 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -8,6 +8,7 @@
 
 #include "rte_pmd_ice.h"
 #include "ice_rxtx.h"
+#include "ice_rxtx_vec_common.h"
 
 #define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
 		PKT_TX_IP_CKSUM |		 \
@@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
+	int tx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_tx_vec_dev_check(dev) &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		tx_check_ret = ice_tx_vec_dev_check(dev);
+		if (tx_check_ret >= 0 &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->tx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
 				txq = dev->data->tx_queues[i];
@@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(NOTICE,
 				"AVX512 is not supported in build env");
 #endif
-			if (!use_avx512 &&
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
 				use_avx2 = true;
 
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+				ad->tx_vec_allowed = false;
+
 		} else {
 			ad->tx_vec_allowed = false;
 		}
@@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	if (ad->tx_vec_allowed) {
 		if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-			PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
-				    dev->data->port_id);
-			dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst =
+					ice_xmit_pkts_vec_avx512_offload;
+			} else {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			}
 #endif
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
@@ -3343,6 +3358,7 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
 #endif
 	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
 	{ ice_xmit_pkts_vec,      "Vector SSE" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 99096e4c21..f72fad0255 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
+					  struct rte_mbuf **tx_pkts,
+					  uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 1cc54903c6..e3b650edcc 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -969,7 +969,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	if (unlikely(nb_pkts == 0))
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 0e5a676e68..27b8ccd67f 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -979,23 +979,26 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	return txq->tx_rs_thresh;
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
-	 struct rte_mbuf *pkt, uint64_t flags)
+	 struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
 {
 	uint64_t high_qw =
 		(ICE_TX_DESC_DTYPE_DATA |
 		 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
 		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
 
+	if (do_offload)
+		ice_txd_enable_offload(pkt, &high_qw);
+
 	__m128i descriptor = _mm_set_epi64x(high_qw,
 				pkt->buf_iova + pkt->data_off);
 	_mm_store_si128((__m128i *)txdp, descriptor);
 }
 
-static inline void
-ice_vtx(volatile struct ice_tx_desc *txdp,
-	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+static __rte_always_inline void
+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
+	uint16_t nb_pkts,  uint64_t flags, bool do_offload)
 {
 	const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
 			((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
@@ -1005,18 +1008,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 			hi_qw_tmpl |
 			((uint64_t)pkt[3]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[3], &hi_qw3);
 		uint64_t hi_qw2 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[2]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[2], &hi_qw2);
 		uint64_t hi_qw1 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[1]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[1], &hi_qw1);
 		uint64_t hi_qw0 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[0]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[0], &hi_qw0);
 
 		__m512i desc0_3 =
 			_mm512_set_epi64
@@ -1033,7 +1044,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 
 	/* do any last ones */
 	while (nb_pkts) {
-		ice_vtx1(txdp, *pkt, flags);
+		ice_vtx1(txdp, *pkt, flags, do_offload);
 		txdp++, pkt++, nb_pkts--;
 	}
 }
@@ -1048,9 +1059,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
 		txep[i].mbuf = tx_pkts[i];
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
-				uint16_t nb_pkts)
+				uint16_t nb_pkts, bool do_offload)
 {
 	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
 	volatile struct ice_tx_desc *txdp;
@@ -1080,11 +1091,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	if (nb_commit >= n) {
 		ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
 
-		ice_vtx(txdp, tx_pkts, n - 1, flags);
+		ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
 		tx_pkts += (n - 1);
 		txdp += (n - 1);
 
-		ice_vtx1(txdp, *tx_pkts++, rs);
+		ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
 
 		nb_commit = (uint16_t)(nb_commit - n);
 
@@ -1098,7 +1109,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
 
-	ice_vtx(txdp, tx_pkts, nb_commit, flags);
+	ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
@@ -1128,7 +1139,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
-						      &tx_pkts[nb_tx], num);
+				&tx_pkts[nb_tx], num, false);
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < num)
+			break;
+	}
+
+	return nb_tx;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+				 uint16_t nb_pkts)
+{
+	uint16_t nb_tx = 0;
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	while (nb_pkts) {
+		uint16_t ret, num;
+
+		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
+				&tx_pkts[nb_tx], num, true);
+
 		nb_tx += ret;
 		nb_pkts -= ret;
 		if (ret < num)
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index c09ac7f667..52db6ceca7 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -73,7 +73,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
 {
 	struct ice_tx_entry *txep;
 	uint32_t n;
@@ -193,7 +193,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 #ifdef CC_AVX512_SUPPORT
 	struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
 
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
+	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
 		struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		if (txq->tx_tail < i) {
@@ -263,29 +264,38 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
-#define ICE_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_TSO |		 \
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
 		DEV_TX_OFFLOAD_TCP_CKSUM)
 
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
 
-	if (txq->offloads & ICE_NO_VECTOR_FLAGS)
-		return -1;
-
 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
 		return -1;
 
-	return 0;
+	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+		return -1;
+
+	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
+
+	return ICE_VECTOR_PATH;
 }
 
 static inline int
@@ -308,14 +318,79 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_tx_queue *txq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (ice_tx_vec_queue_default(txq))
+		ret = ice_tx_vec_queue_default(txq);
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+		       uint64_t *txd_hi)
+{
+	uint64_t ol_flags = tx_pkt->ol_flags;
+	uint32_t td_cmd = 0;
+	uint32_t td_offset = 0;
+
+	/* Tx Checksum Offload */
+	/* SET MACLEN */
+	td_offset |= (tx_pkt->l2_len >> 1) <<
+			ICE_TX_DESC_LEN_MACLEN_S;
+
+	/* Enable L3 checksum offload */
+	if (ol_flags & PKT_TX_IP_CKSUM) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV4) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV6) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	}
+
+	/* Enable L4 checksum offloads */
+	switch (ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_TCP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_SCTP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_UDP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	default:
+		break;
+	}
+
+	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+	/* Tx VLAN/QINQ insertion Offload */
+	if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+				ICE_TXD_QW1_L2TAG1_S);
+	}
+
+	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
 #endif
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 3e467c48f1..6029cc2d99 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	nb_commit = nb_pkts;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v3 2/2] net/ice: add Rx AVX512 offload path
  2021-04-12  4:03 ` [dpdk-dev] [PATCH v3 0/2] add alternative " Leyi Rong
  2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 1/2] net/ice: add Tx " Leyi Rong
@ 2021-04-12  4:03   ` Leyi Rong
  1 sibling, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-12  4:03 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  50 +++-
 drivers/net/ice/ice_rxtx.h            |   7 +
 drivers/net/ice/ice_rxtx_vec_avx512.c | 345 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h |  50 ++--
 4 files changed, 298 insertions(+), 154 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index d7c1e2164b..328dac0dfa 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t len;
 	int use_def_burst_func = 1;
+	uint64_t offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx]) {
 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
+	rxq->offloads = offloads;
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
@@ -1990,7 +1994,9 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
 #ifdef CC_AVX512_SUPPORT
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
 #endif
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
@@ -3052,12 +3058,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_rx_queue *rxq;
 	int i;
+	int rx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
@@ -3091,11 +3099,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		if (dev->data->scattered_rx) {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Scattered Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_scattered_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3125,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		} else {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3162,7 +3186,9 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
+	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
 #endif
 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f72fad0255..b29387ca0f 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
+	uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+					  struct rte_mbuf **rx_pkts,
+					  uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    struct rte_mbuf **rx_pkts,
 					    uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+						    struct rte_mbuf **rx_pkts,
+						    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 27b8ccd67f..36a3a95580 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -147,10 +147,12 @@ ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
 	return fdir_flags;
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			      struct rte_mbuf **rx_pkts,
-			      uint16_t nb_pkts, uint8_t *split_packet)
+			      uint16_t nb_pkts,
+			      uint8_t *split_packet,
+			      bool do_offload)
 {
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
@@ -221,6 +223,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+	/* following code block is for Rx Checksum Offload */
 	/* Status/Error flag masks */
 	/**
 	 * mask everything except Checksum Reports, RSS indication
@@ -484,37 +487,42 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 		__m256i status0_7 = _mm512_extracti64x4_epi64
 			(raw_status0_7, 0);
 
-		/* now do flag manipulation */
+		__m256i mbuf_flags = _mm256_set1_epi32(0);
 
-		/* get only flag/error bits we want */
-		const __m256i flag_bits =
-			_mm256_and_si256(status0_7, flags_mask);
-		/**
-		 * l3_l4_error flags, shuffle, then shift to correct adjustment
-		 * of flags in flags_shuf, and finally mask out extra bits
-		 */
-		__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
-				_mm256_srli_epi32(flag_bits, 4));
-		l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-		__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
-		__m256i l4_outer_flags =
-				_mm256_and_si256(l3_l4_flags, l4_outer_mask);
-		l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
-		__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
-		l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
-		/* set rss and vlan flags */
-		const __m256i rss_vlan_flag_bits =
-			_mm256_srli_epi32(flag_bits, 12);
-		const __m256i rss_vlan_flags =
-			_mm256_shuffle_epi8(rss_vlan_flags_shuf,
-					    rss_vlan_flag_bits);
-
-		/* merge flags */
-		__m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+		if (do_offload) {
+			/* now do flag manipulation */
+
+			/* get only flag/error bits we want */
+			const __m256i flag_bits =
+				_mm256_and_si256(status0_7, flags_mask);
+			/**
+			 * l3_l4_error flags, shuffle, then shift to correct adjustment
+			 * of flags in flags_shuf, and finally mask out extra bits
+			 */
+			__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+					_mm256_srli_epi32(flag_bits, 4));
+			l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+			__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+			__m256i l4_outer_flags =
+					_mm256_and_si256(l3_l4_flags, l4_outer_mask);
+			l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+			__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+			l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+			/* set rss and vlan flags */
+			const __m256i rss_vlan_flag_bits =
+				_mm256_srli_epi32(flag_bits, 12);
+			const __m256i rss_vlan_flags =
+				_mm256_shuffle_epi8(rss_vlan_flags_shuf,
+						    rss_vlan_flag_bits);
+
+			/* merge flags */
+			mbuf_flags = _mm256_or_si256(l3_l4_flags,
 						     rss_vlan_flags);
+		}
 
 		if (rxq->fdir_enabled) {
 			const __m256i fdir_id4_7 =
@@ -526,12 +534,19 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			const __m256i fdir_id0_7 =
 				_mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
 
-			const __m256i fdir_flags =
-				ice_flex_rxd_to_fdir_flags_vec_avx512
-					(fdir_id0_7);
-
-			/* merge with fdir_flags */
-			mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+			if (do_offload) {
+				const __m256i fdir_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+
+				/* merge with fdir_flags */
+				mbuf_flags = _mm256_or_si256
+						(mbuf_flags, fdir_flags);
+			} else {
+				mbuf_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+			}
 
 			/* write to mbuf: have to use scalar store here */
 			rx_pkts[i + 0]->hash.fdir.hi =
@@ -559,95 +574,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 				_mm256_extract_epi32(fdir_id0_7, 4);
 		} /* if() on fdir_enabled */
 
+		if (do_offload) {
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
-		 * will cause performance drop to get into this context.
-		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
-			/* load bottom half of every 32B desc */
-			const __m128i raw_desc_bh7 =
-				_mm_load_si128
-					((void *)(&rxdp[7].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh6 =
-				_mm_load_si128
-					((void *)(&rxdp[6].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh5 =
-				_mm_load_si128
-					((void *)(&rxdp[5].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh4 =
-				_mm_load_si128
-					((void *)(&rxdp[4].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh3 =
-				_mm_load_si128
-					((void *)(&rxdp[3].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh2 =
-				_mm_load_si128
-					((void *)(&rxdp[2].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh1 =
-				_mm_load_si128
-					((void *)(&rxdp[1].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh0 =
-				_mm_load_si128
-					((void *)(&rxdp[0].wb.status_error1));
-
-			__m256i raw_desc_bh6_7 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh6),
-					raw_desc_bh7, 1);
-			__m256i raw_desc_bh4_5 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh4),
-					raw_desc_bh5, 1);
-			__m256i raw_desc_bh2_3 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh2),
-					raw_desc_bh3, 1);
-			__m256i raw_desc_bh0_1 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh0),
-					raw_desc_bh1, 1);
-
 			/**
-			 * to shift the 32b RSS hash value to the
-			 * highest 32b of each 128b before mask
+			 * needs to load 2nd 16B of each desc for RSS hash parsing,
+			 * will cause performance drop to get into this context.
 			 */
-			__m256i rss_hash6_7 =
-				_mm256_slli_epi64(raw_desc_bh6_7, 32);
-			__m256i rss_hash4_5 =
-				_mm256_slli_epi64(raw_desc_bh4_5, 32);
-			__m256i rss_hash2_3 =
-				_mm256_slli_epi64(raw_desc_bh2_3, 32);
-			__m256i rss_hash0_1 =
-				_mm256_slli_epi64(raw_desc_bh0_1, 32);
-
-			__m256i rss_hash_msk =
-				_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
-						 0xFFFFFFFF, 0, 0, 0);
-
-			rss_hash6_7 = _mm256_and_si256
-					(rss_hash6_7, rss_hash_msk);
-			rss_hash4_5 = _mm256_and_si256
-					(rss_hash4_5, rss_hash_msk);
-			rss_hash2_3 = _mm256_and_si256
-					(rss_hash2_3, rss_hash_msk);
-			rss_hash0_1 = _mm256_and_si256
-					(rss_hash0_1, rss_hash_msk);
-
-			mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
-			mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
-			mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
-			mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
-		} /* if() on RSS hash parsing */
+			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+					DEV_RX_OFFLOAD_RSS_HASH) {
+				/* load bottom half of every 32B desc */
+				const __m128i raw_desc_bh7 =
+					_mm_load_si128
+						((void *)(&rxdp[7].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh6 =
+					_mm_load_si128
+						((void *)(&rxdp[6].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh5 =
+					_mm_load_si128
+						((void *)(&rxdp[5].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh4 =
+					_mm_load_si128
+						((void *)(&rxdp[4].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh3 =
+					_mm_load_si128
+						((void *)(&rxdp[3].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh2 =
+					_mm_load_si128
+						((void *)(&rxdp[2].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh1 =
+					_mm_load_si128
+						((void *)(&rxdp[1].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh0 =
+					_mm_load_si128
+						((void *)(&rxdp[0].wb.status_error1));
+
+				__m256i raw_desc_bh6_7 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh6),
+						raw_desc_bh7, 1);
+				__m256i raw_desc_bh4_5 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh4),
+						raw_desc_bh5, 1);
+				__m256i raw_desc_bh2_3 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh2),
+						raw_desc_bh3, 1);
+				__m256i raw_desc_bh0_1 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh0),
+						raw_desc_bh1, 1);
+
+				/**
+				 * to shift the 32b RSS hash value to the
+				 * highest 32b of each 128b before mask
+				 */
+				__m256i rss_hash6_7 =
+					_mm256_slli_epi64(raw_desc_bh6_7, 32);
+				__m256i rss_hash4_5 =
+					_mm256_slli_epi64(raw_desc_bh4_5, 32);
+				__m256i rss_hash2_3 =
+					_mm256_slli_epi64(raw_desc_bh2_3, 32);
+				__m256i rss_hash0_1 =
+					_mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+				__m256i rss_hash_msk =
+					_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+							 0xFFFFFFFF, 0, 0, 0);
+
+				rss_hash6_7 = _mm256_and_si256
+						(rss_hash6_7, rss_hash_msk);
+				rss_hash4_5 = _mm256_and_si256
+						(rss_hash4_5, rss_hash_msk);
+				rss_hash2_3 = _mm256_and_si256
+						(rss_hash2_3, rss_hash_msk);
+				rss_hash0_1 = _mm256_and_si256
+						(rss_hash0_1, rss_hash_msk);
+
+				mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+				mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+				mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+				mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+			} /* if() on RSS hash parsing */
 #endif
+		}
 
 		/**
 		 * At this point, we have the 8 sets of flags in the low 16-bits
@@ -803,7 +820,19 @@ uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 uint16_t nb_pkts)
 {
-	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+					     nb_pkts, NULL, true);
 }
 
 /**
@@ -820,7 +849,49 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	/* get some new buffers */
 	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
-						       split_flags);
+						       split_flags, false);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+	if (!rxq->pkt_first_seg &&
+	    split_fl64[0] == 0 && split_fl64[1] == 0 &&
+	    split_fl64[2] == 0 && split_fl64[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly */
+	unsigned int i = 0;
+
+	if (!rxq->pkt_first_seg) {
+		/* find the first split flag, and only reassemble then */
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+	return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+					     &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+					    struct rte_mbuf **rx_pkts,
+					    uint16_t nb_pkts)
+{
+	struct ice_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+				rx_pkts, nb_pkts, split_flags, true);
 	if (nb_bufs == 0)
 		return 0;
 
@@ -871,6 +942,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+					   struct rte_mbuf **rx_pkts,
+					   uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > ICE_VPMD_RX_BURST) {
+		uint16_t burst =
+			ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, ICE_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < ICE_VPMD_RX_BURST)
+			return retval;
+	}
+	return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 52db6ceca7..462a43b21e 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -243,6 +243,27 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
+		DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (				\
+		DEV_RX_OFFLOAD_CHECKSUM |		\
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_RX_OFFLOAD_VLAN |			\
+		DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -261,23 +282,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH		0
-#define ICE_VECTOR_OFFLOAD_PATH	1
+	return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -303,14 +312,19 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_rx_queue *rxq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
-		if (ice_rx_vec_queue_default(rxq))
+		ret = (ice_rx_vec_queue_default(rxq));
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
 static inline int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 0/2] add alternative AVX512 offload path
  2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
                   ` (2 preceding siblings ...)
  2021-04-12  4:03 ` [dpdk-dev] [PATCH v3 0/2] add alternative " Leyi Rong
@ 2021-04-15  1:13 ` Leyi Rong
  2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 1/2] net/ice: add Tx " Leyi Rong
  2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 2/2] net/ice: add Rx " Leyi Rong
  2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
  4 siblings, 2 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  1:13 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
offload features, like checksum/vlan/RSS/QinQ offload.

---
v4:
- code rebased.

v3:
- complete ice_dev_supported_ptypes_get() for new adding offload
  functions.
- complete tx/rx burst infos for new adding offload functions.

v2:
- add "do_offload" parameter to according functions for reducing code
  duplication.

Leyi Rong (2):
  net/ice: add Tx AVX512 offload path
  net/ice: add Rx AVX512 offload path

 drivers/net/ice/ice_rxtx.c            |  78 +++--
 drivers/net/ice/ice_rxtx.h            |  10 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c | 403 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h | 129 +++++++--
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 448 insertions(+), 176 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 1/2] net/ice: add Tx AVX512 offload path
  2021-04-15  1:13 ` [dpdk-dev] [PATCH v4 0/2] add alternative " Leyi Rong
@ 2021-04-15  1:13   ` Leyi Rong
  2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 2/2] net/ice: add Rx " Leyi Rong
  1 sibling, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  1:13 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Tx data path for AVX512 which can support partial
Tx offload features, including Tx checksum offload, vlan/QinQ
insertion offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  28 +++++--
 drivers/net/ice/ice_rxtx.h            |   3 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c |  58 +++++++++++---
 drivers/net/ice/ice_rxtx_vec_common.h | 105 ++++++++++++++++++++++----
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 163 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 0827db9c9e..75326c76ab 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -8,6 +8,7 @@
 
 #include "rte_pmd_ice.h"
 #include "ice_rxtx.h"
+#include "ice_rxtx_vec_common.h"
 
 #define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
 		PKT_TX_IP_CKSUM |		 \
@@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
+	int tx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_tx_vec_dev_check(dev) &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		tx_check_ret = ice_tx_vec_dev_check(dev);
+		if (tx_check_ret >= 0 &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->tx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
 				txq = dev->data->tx_queues[i];
@@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(NOTICE,
 				"AVX512 is not supported in build env");
 #endif
-			if (!use_avx512 &&
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
 				use_avx2 = true;
 
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+				ad->tx_vec_allowed = false;
+
 		} else {
 			ad->tx_vec_allowed = false;
 		}
@@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	if (ad->tx_vec_allowed) {
 		if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-			PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
-				    dev->data->port_id);
-			dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst =
+					ice_xmit_pkts_vec_avx512_offload;
+			} else {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			}
 #endif
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
@@ -3343,6 +3358,7 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
 #endif
 	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
 	{ ice_xmit_pkts_vec,      "Vector SSE" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 99096e4c21..f72fad0255 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
+					  struct rte_mbuf **tx_pkts,
+					  uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 83dcdf15d4..8d4bd6df1b 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -853,7 +853,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	if (unlikely(nb_pkts == 0))
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index a668b82232..1c4a59a170 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -982,23 +982,26 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	return txq->tx_rs_thresh;
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
-	 struct rte_mbuf *pkt, uint64_t flags)
+	 struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
 {
 	uint64_t high_qw =
 		(ICE_TX_DESC_DTYPE_DATA |
 		 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
 		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
 
+	if (do_offload)
+		ice_txd_enable_offload(pkt, &high_qw);
+
 	__m128i descriptor = _mm_set_epi64x(high_qw,
 				pkt->buf_iova + pkt->data_off);
 	_mm_store_si128((__m128i *)txdp, descriptor);
 }
 
-static inline void
-ice_vtx(volatile struct ice_tx_desc *txdp,
-	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+static __rte_always_inline void
+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
+	uint16_t nb_pkts,  uint64_t flags, bool do_offload)
 {
 	const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
 			((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
@@ -1008,18 +1011,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 			hi_qw_tmpl |
 			((uint64_t)pkt[3]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[3], &hi_qw3);
 		uint64_t hi_qw2 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[2]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[2], &hi_qw2);
 		uint64_t hi_qw1 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[1]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[1], &hi_qw1);
 		uint64_t hi_qw0 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[0]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[0], &hi_qw0);
 
 		__m512i desc0_3 =
 			_mm512_set_epi64
@@ -1036,7 +1047,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 
 	/* do any last ones */
 	while (nb_pkts) {
-		ice_vtx1(txdp, *pkt, flags);
+		ice_vtx1(txdp, *pkt, flags, do_offload);
 		txdp++, pkt++, nb_pkts--;
 	}
 }
@@ -1051,9 +1062,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
 		txep[i].mbuf = tx_pkts[i];
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
-				uint16_t nb_pkts)
+				uint16_t nb_pkts, bool do_offload)
 {
 	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
 	volatile struct ice_tx_desc *txdp;
@@ -1083,11 +1094,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	if (nb_commit >= n) {
 		ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
 
-		ice_vtx(txdp, tx_pkts, n - 1, flags);
+		ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
 		tx_pkts += (n - 1);
 		txdp += (n - 1);
 
-		ice_vtx1(txdp, *tx_pkts++, rs);
+		ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
 
 		nb_commit = (uint16_t)(nb_commit - n);
 
@@ -1101,7 +1112,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
 
-	ice_vtx(txdp, tx_pkts, nb_commit, flags);
+	ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
@@ -1131,7 +1142,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
-						      &tx_pkts[nb_tx], num);
+				&tx_pkts[nb_tx], num, false);
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < num)
+			break;
+	}
+
+	return nb_tx;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+				 uint16_t nb_pkts)
+{
+	uint16_t nb_tx = 0;
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	while (nb_pkts) {
+		uint16_t ret, num;
+
+		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
+				&tx_pkts[nb_tx], num, true);
+
 		nb_tx += ret;
 		nb_pkts -= ret;
 		if (ret < num)
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index a5d76a2936..30500661cb 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -77,7 +77,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
 {
 	struct ice_tx_entry *txep;
 	uint32_t n;
@@ -197,7 +197,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 #ifdef CC_AVX512_SUPPORT
 	struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
 
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
+	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
 		struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		if (txq->tx_tail < i) {
@@ -267,29 +268,38 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
-#define ICE_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_TSO |		 \
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
 		DEV_TX_OFFLOAD_TCP_CKSUM)
 
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
 
-	if (txq->offloads & ICE_NO_VECTOR_FLAGS)
-		return -1;
-
 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
 		return -1;
 
-	return 0;
+	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+		return -1;
+
+	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
+
+	return ICE_VECTOR_PATH;
 }
 
 static inline int
@@ -312,14 +322,19 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_tx_queue *txq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (ice_tx_vec_queue_default(txq))
+		ret = ice_tx_vec_queue_default(txq);
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
 #ifdef CC_AVX2_SUPPORT
@@ -521,4 +536,64 @@ ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)
 }
 #endif
 
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+		       uint64_t *txd_hi)
+{
+	uint64_t ol_flags = tx_pkt->ol_flags;
+	uint32_t td_cmd = 0;
+	uint32_t td_offset = 0;
+
+	/* Tx Checksum Offload */
+	/* SET MACLEN */
+	td_offset |= (tx_pkt->l2_len >> 1) <<
+			ICE_TX_DESC_LEN_MACLEN_S;
+
+	/* Enable L3 checksum offload */
+	if (ol_flags & PKT_TX_IP_CKSUM) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV4) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV6) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	}
+
+	/* Enable L4 checksum offloads */
+	switch (ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_TCP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_SCTP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_UDP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	default:
+		break;
+	}
+
+	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+	/* Tx VLAN/QINQ insertion Offload */
+	if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+				ICE_TXD_QW1_L2TAG1_S);
+	}
+
+	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
 #endif
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 3e467c48f1..6029cc2d99 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	nb_commit = nb_pkts;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v4 2/2] net/ice: add Rx AVX512 offload path
  2021-04-15  1:13 ` [dpdk-dev] [PATCH v4 0/2] add alternative " Leyi Rong
  2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 1/2] net/ice: add Tx " Leyi Rong
@ 2021-04-15  1:13   ` Leyi Rong
  1 sibling, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  1:13 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  50 +++-
 drivers/net/ice/ice_rxtx.h            |   7 +
 drivers/net/ice/ice_rxtx_vec_avx512.c | 345 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h |  50 ++--
 4 files changed, 298 insertions(+), 154 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 75326c76ab..92fbbc18da 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t len;
 	int use_def_burst_func = 1;
+	uint64_t offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx]) {
 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
+	rxq->offloads = offloads;
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
@@ -1990,7 +1994,9 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
 #ifdef CC_AVX512_SUPPORT
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
 #endif
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
@@ -3052,12 +3058,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_rx_queue *rxq;
 	int i;
+	int rx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
@@ -3091,11 +3099,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		if (dev->data->scattered_rx) {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Scattered Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_scattered_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3125,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		} else {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3162,7 +3186,9 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
+	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
 #endif
 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f72fad0255..b29387ca0f 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
+	uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+					  struct rte_mbuf **rx_pkts,
+					  uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    struct rte_mbuf **rx_pkts,
 					    uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+						    struct rte_mbuf **rx_pkts,
+						    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 1c4a59a170..ad6c69da9b 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -150,10 +150,12 @@ ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
 	return fdir_flags;
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			      struct rte_mbuf **rx_pkts,
-			      uint16_t nb_pkts, uint8_t *split_packet)
+			      uint16_t nb_pkts,
+			      uint8_t *split_packet,
+			      bool do_offload)
 {
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
@@ -224,6 +226,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+	/* following code block is for Rx Checksum Offload */
 	/* Status/Error flag masks */
 	/**
 	 * mask everything except Checksum Reports, RSS indication
@@ -487,37 +490,42 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 		__m256i status0_7 = _mm512_extracti64x4_epi64
 			(raw_status0_7, 0);
 
-		/* now do flag manipulation */
+		__m256i mbuf_flags = _mm256_set1_epi32(0);
 
-		/* get only flag/error bits we want */
-		const __m256i flag_bits =
-			_mm256_and_si256(status0_7, flags_mask);
-		/**
-		 * l3_l4_error flags, shuffle, then shift to correct adjustment
-		 * of flags in flags_shuf, and finally mask out extra bits
-		 */
-		__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
-				_mm256_srli_epi32(flag_bits, 4));
-		l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-		__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
-		__m256i l4_outer_flags =
-				_mm256_and_si256(l3_l4_flags, l4_outer_mask);
-		l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
-		__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
-		l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
-		/* set rss and vlan flags */
-		const __m256i rss_vlan_flag_bits =
-			_mm256_srli_epi32(flag_bits, 12);
-		const __m256i rss_vlan_flags =
-			_mm256_shuffle_epi8(rss_vlan_flags_shuf,
-					    rss_vlan_flag_bits);
-
-		/* merge flags */
-		__m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+		if (do_offload) {
+			/* now do flag manipulation */
+
+			/* get only flag/error bits we want */
+			const __m256i flag_bits =
+				_mm256_and_si256(status0_7, flags_mask);
+			/**
+			 * l3_l4_error flags, shuffle, then shift to correct adjustment
+			 * of flags in flags_shuf, and finally mask out extra bits
+			 */
+			__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+					_mm256_srli_epi32(flag_bits, 4));
+			l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+			__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+			__m256i l4_outer_flags =
+					_mm256_and_si256(l3_l4_flags, l4_outer_mask);
+			l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+			__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+			l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+			/* set rss and vlan flags */
+			const __m256i rss_vlan_flag_bits =
+				_mm256_srli_epi32(flag_bits, 12);
+			const __m256i rss_vlan_flags =
+				_mm256_shuffle_epi8(rss_vlan_flags_shuf,
+						    rss_vlan_flag_bits);
+
+			/* merge flags */
+			mbuf_flags = _mm256_or_si256(l3_l4_flags,
 						     rss_vlan_flags);
+		}
 
 		if (rxq->fdir_enabled) {
 			const __m256i fdir_id4_7 =
@@ -529,12 +537,19 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			const __m256i fdir_id0_7 =
 				_mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
 
-			const __m256i fdir_flags =
-				ice_flex_rxd_to_fdir_flags_vec_avx512
-					(fdir_id0_7);
-
-			/* merge with fdir_flags */
-			mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+			if (do_offload) {
+				const __m256i fdir_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+
+				/* merge with fdir_flags */
+				mbuf_flags = _mm256_or_si256
+						(mbuf_flags, fdir_flags);
+			} else {
+				mbuf_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+			}
 
 			/* write to mbuf: have to use scalar store here */
 			rx_pkts[i + 0]->hash.fdir.hi =
@@ -562,95 +577,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 				_mm256_extract_epi32(fdir_id0_7, 4);
 		} /* if() on fdir_enabled */
 
+		if (do_offload) {
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
-		 * will cause performance drop to get into this context.
-		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
-			/* load bottom half of every 32B desc */
-			const __m128i raw_desc_bh7 =
-				_mm_load_si128
-					((void *)(&rxdp[7].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh6 =
-				_mm_load_si128
-					((void *)(&rxdp[6].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh5 =
-				_mm_load_si128
-					((void *)(&rxdp[5].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh4 =
-				_mm_load_si128
-					((void *)(&rxdp[4].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh3 =
-				_mm_load_si128
-					((void *)(&rxdp[3].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh2 =
-				_mm_load_si128
-					((void *)(&rxdp[2].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh1 =
-				_mm_load_si128
-					((void *)(&rxdp[1].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh0 =
-				_mm_load_si128
-					((void *)(&rxdp[0].wb.status_error1));
-
-			__m256i raw_desc_bh6_7 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh6),
-					raw_desc_bh7, 1);
-			__m256i raw_desc_bh4_5 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh4),
-					raw_desc_bh5, 1);
-			__m256i raw_desc_bh2_3 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh2),
-					raw_desc_bh3, 1);
-			__m256i raw_desc_bh0_1 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh0),
-					raw_desc_bh1, 1);
-
 			/**
-			 * to shift the 32b RSS hash value to the
-			 * highest 32b of each 128b before mask
+			 * needs to load 2nd 16B of each desc for RSS hash parsing,
+			 * will cause performance drop to get into this context.
 			 */
-			__m256i rss_hash6_7 =
-				_mm256_slli_epi64(raw_desc_bh6_7, 32);
-			__m256i rss_hash4_5 =
-				_mm256_slli_epi64(raw_desc_bh4_5, 32);
-			__m256i rss_hash2_3 =
-				_mm256_slli_epi64(raw_desc_bh2_3, 32);
-			__m256i rss_hash0_1 =
-				_mm256_slli_epi64(raw_desc_bh0_1, 32);
-
-			__m256i rss_hash_msk =
-				_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
-						 0xFFFFFFFF, 0, 0, 0);
-
-			rss_hash6_7 = _mm256_and_si256
-					(rss_hash6_7, rss_hash_msk);
-			rss_hash4_5 = _mm256_and_si256
-					(rss_hash4_5, rss_hash_msk);
-			rss_hash2_3 = _mm256_and_si256
-					(rss_hash2_3, rss_hash_msk);
-			rss_hash0_1 = _mm256_and_si256
-					(rss_hash0_1, rss_hash_msk);
-
-			mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
-			mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
-			mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
-			mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
-		} /* if() on RSS hash parsing */
+			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+					DEV_RX_OFFLOAD_RSS_HASH) {
+				/* load bottom half of every 32B desc */
+				const __m128i raw_desc_bh7 =
+					_mm_load_si128
+						((void *)(&rxdp[7].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh6 =
+					_mm_load_si128
+						((void *)(&rxdp[6].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh5 =
+					_mm_load_si128
+						((void *)(&rxdp[5].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh4 =
+					_mm_load_si128
+						((void *)(&rxdp[4].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh3 =
+					_mm_load_si128
+						((void *)(&rxdp[3].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh2 =
+					_mm_load_si128
+						((void *)(&rxdp[2].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh1 =
+					_mm_load_si128
+						((void *)(&rxdp[1].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh0 =
+					_mm_load_si128
+						((void *)(&rxdp[0].wb.status_error1));
+
+				__m256i raw_desc_bh6_7 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh6),
+						raw_desc_bh7, 1);
+				__m256i raw_desc_bh4_5 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh4),
+						raw_desc_bh5, 1);
+				__m256i raw_desc_bh2_3 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh2),
+						raw_desc_bh3, 1);
+				__m256i raw_desc_bh0_1 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh0),
+						raw_desc_bh1, 1);
+
+				/**
+				 * to shift the 32b RSS hash value to the
+				 * highest 32b of each 128b before mask
+				 */
+				__m256i rss_hash6_7 =
+					_mm256_slli_epi64(raw_desc_bh6_7, 32);
+				__m256i rss_hash4_5 =
+					_mm256_slli_epi64(raw_desc_bh4_5, 32);
+				__m256i rss_hash2_3 =
+					_mm256_slli_epi64(raw_desc_bh2_3, 32);
+				__m256i rss_hash0_1 =
+					_mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+				__m256i rss_hash_msk =
+					_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+							 0xFFFFFFFF, 0, 0, 0);
+
+				rss_hash6_7 = _mm256_and_si256
+						(rss_hash6_7, rss_hash_msk);
+				rss_hash4_5 = _mm256_and_si256
+						(rss_hash4_5, rss_hash_msk);
+				rss_hash2_3 = _mm256_and_si256
+						(rss_hash2_3, rss_hash_msk);
+				rss_hash0_1 = _mm256_and_si256
+						(rss_hash0_1, rss_hash_msk);
+
+				mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+				mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+				mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+				mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+			} /* if() on RSS hash parsing */
 #endif
+		}
 
 		/**
 		 * At this point, we have the 8 sets of flags in the low 16-bits
@@ -806,7 +823,19 @@ uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 uint16_t nb_pkts)
 {
-	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+					     nb_pkts, NULL, true);
 }
 
 /**
@@ -823,7 +852,49 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	/* get some new buffers */
 	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
-						       split_flags);
+						       split_flags, false);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+	if (!rxq->pkt_first_seg &&
+	    split_fl64[0] == 0 && split_fl64[1] == 0 &&
+	    split_fl64[2] == 0 && split_fl64[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly */
+	unsigned int i = 0;
+
+	if (!rxq->pkt_first_seg) {
+		/* find the first split flag, and only reassemble then */
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+	return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+					     &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+					    struct rte_mbuf **rx_pkts,
+					    uint16_t nb_pkts)
+{
+	struct ice_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+				rx_pkts, nb_pkts, split_flags, true);
 	if (nb_bufs == 0)
 		return 0;
 
@@ -874,6 +945,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+					   struct rte_mbuf **rx_pkts,
+					   uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > ICE_VPMD_RX_BURST) {
+		uint16_t burst =
+			ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, ICE_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < ICE_VPMD_RX_BURST)
+			return retval;
+	}
+	return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 30500661cb..f30d3e56c6 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -247,6 +247,27 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
+		DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (				\
+		DEV_RX_OFFLOAD_CHECKSUM |		\
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_RX_OFFLOAD_VLAN |			\
+		DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -265,23 +286,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH		0
-#define ICE_VECTOR_OFFLOAD_PATH	1
+	return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -307,14 +316,19 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_rx_queue *rxq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
-		if (ice_rx_vec_queue_default(rxq))
+		ret = (ice_rx_vec_queue_default(rxq));
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
 static inline int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
  2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
                   ` (3 preceding siblings ...)
  2021-04-15  1:13 ` [dpdk-dev] [PATCH v4 0/2] add alternative " Leyi Rong
@ 2021-04-15  8:58 ` Leyi Rong
  2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx " Leyi Rong
                     ` (2 more replies)
  4 siblings, 3 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  8:58 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
offload features, like checksum/vlan/RSS/QinQ offload.

---
v5:
- fix bug [DPDK-26560] invalid checksum for tunnel packet.

v4:
- code rebased.

v3:
- complete ice_dev_supported_ptypes_get() for new adding offload
  functions.
- complete tx/rx burst infos for new adding offload functions.

v2:
- add "do_offload" parameter to according functions for reducing code
  duplication.

Leyi Rong (2):
  net/ice: add Tx AVX512 offload path
  net/ice: add Rx AVX512 offload path

 drivers/net/ice/ice_rxtx.c            |  78 +++--
 drivers/net/ice/ice_rxtx.h            |  10 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c | 403 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h | 130 +++++++--
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 449 insertions(+), 176 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx AVX512 offload path
  2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
@ 2021-04-15  8:58   ` Leyi Rong
  2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 2/2] net/ice: add Rx " Leyi Rong
  2021-04-16  9:02   ` [dpdk-dev] [PATCH v5 0/2] add alternative " Sun, QinX
  2 siblings, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  8:58 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Add alternative Tx data path for AVX512 which can support partial
Tx offload features, including Tx checksum offload, vlan/QinQ
insertion offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  28 +++++--
 drivers/net/ice/ice_rxtx.h            |   3 +
 drivers/net/ice/ice_rxtx_vec_avx2.c   |   2 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c |  58 +++++++++++---
 drivers/net/ice/ice_rxtx_vec_common.h | 106 ++++++++++++++++++++++----
 drivers/net/ice/ice_rxtx_vec_sse.c    |   2 +-
 6 files changed, 164 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 0827db9c9e..75326c76ab 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -8,6 +8,7 @@
 
 #include "rte_pmd_ice.h"
 #include "ice_rxtx.h"
+#include "ice_rxtx_vec_common.h"
 
 #define ICE_TX_CKSUM_OFFLOAD_MASK (		 \
 		PKT_TX_IP_CKSUM |		 \
@@ -3267,12 +3268,14 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
+	int tx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_tx_vec_dev_check(dev) &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		tx_check_ret = ice_tx_vec_dev_check(dev);
+		if (tx_check_ret >= 0 &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->tx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_tx_queues; i++) {
 				txq = dev->data->tx_queues[i];
@@ -3291,12 +3294,15 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(NOTICE,
 				"AVX512 is not supported in build env");
 #endif
-			if (!use_avx512 &&
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_PATH &&
 			(rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
 				use_avx2 = true;
 
+			if (!use_avx512 && tx_check_ret == ICE_VECTOR_OFFLOAD_PATH)
+				ad->tx_vec_allowed = false;
+
 		} else {
 			ad->tx_vec_allowed = false;
 		}
@@ -3305,9 +3311,18 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 	if (ad->tx_vec_allowed) {
 		if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-			PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
-				    dev->data->port_id);
-			dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			if (tx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst =
+					ice_xmit_pkts_vec_avx512_offload;
+			} else {
+				PMD_DRV_LOG(NOTICE,
+					    "Using AVX512 Vector Tx (port %d).",
+					    dev->data->port_id);
+				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+			}
 #endif
 		} else {
 			PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
@@ -3343,6 +3358,7 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+	{ ice_xmit_pkts_vec_avx512_offload, "Offload Vector AVX512" },
 #endif
 	{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
 	{ ice_xmit_pkts_vec,      "Vector SSE" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 99096e4c21..f72fad0255 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -261,6 +261,9 @@ uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
+					  struct rte_mbuf **tx_pkts,
+					  uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
 int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index 83dcdf15d4..8d4bd6df1b 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -853,7 +853,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	if (unlikely(nb_pkts == 0))
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index a668b82232..1c4a59a170 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -982,23 +982,26 @@ ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 	return txq->tx_rs_thresh;
 }
 
-static inline void
+static __rte_always_inline void
 ice_vtx1(volatile struct ice_tx_desc *txdp,
-	 struct rte_mbuf *pkt, uint64_t flags)
+	 struct rte_mbuf *pkt, uint64_t flags, bool do_offload)
 {
 	uint64_t high_qw =
 		(ICE_TX_DESC_DTYPE_DATA |
 		 ((uint64_t)flags  << ICE_TXD_QW1_CMD_S) |
 		 ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S));
 
+	if (do_offload)
+		ice_txd_enable_offload(pkt, &high_qw);
+
 	__m128i descriptor = _mm_set_epi64x(high_qw,
 				pkt->buf_iova + pkt->data_off);
 	_mm_store_si128((__m128i *)txdp, descriptor);
 }
 
-static inline void
-ice_vtx(volatile struct ice_tx_desc *txdp,
-	struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+static __rte_always_inline void
+ice_vtx(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkt,
+	uint16_t nb_pkts,  uint64_t flags, bool do_offload)
 {
 	const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA |
 			((uint64_t)flags  << ICE_TXD_QW1_CMD_S));
@@ -1008,18 +1011,26 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 			hi_qw_tmpl |
 			((uint64_t)pkt[3]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[3], &hi_qw3);
 		uint64_t hi_qw2 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[2]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[2], &hi_qw2);
 		uint64_t hi_qw1 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[1]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[1], &hi_qw1);
 		uint64_t hi_qw0 =
 			hi_qw_tmpl |
 			((uint64_t)pkt[0]->data_len <<
 			 ICE_TXD_QW1_TX_BUF_SZ_S);
+		if (do_offload)
+			ice_txd_enable_offload(pkt[0], &hi_qw0);
 
 		__m512i desc0_3 =
 			_mm512_set_epi64
@@ -1036,7 +1047,7 @@ ice_vtx(volatile struct ice_tx_desc *txdp,
 
 	/* do any last ones */
 	while (nb_pkts) {
-		ice_vtx1(txdp, *pkt, flags);
+		ice_vtx1(txdp, *pkt, flags, do_offload);
 		txdp++, pkt++, nb_pkts--;
 	}
 }
@@ -1051,9 +1062,9 @@ ice_tx_backlog_entry_avx512(struct ice_vec_tx_entry *txep,
 		txep[i].mbuf = tx_pkts[i];
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
-				uint16_t nb_pkts)
+				uint16_t nb_pkts, bool do_offload)
 {
 	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
 	volatile struct ice_tx_desc *txdp;
@@ -1083,11 +1094,11 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	if (nb_commit >= n) {
 		ice_tx_backlog_entry_avx512(txep, tx_pkts, n);
 
-		ice_vtx(txdp, tx_pkts, n - 1, flags);
+		ice_vtx(txdp, tx_pkts, n - 1, flags, do_offload);
 		tx_pkts += (n - 1);
 		txdp += (n - 1);
 
-		ice_vtx1(txdp, *tx_pkts++, rs);
+		ice_vtx1(txdp, *tx_pkts++, rs, do_offload);
 
 		nb_commit = (uint16_t)(nb_commit - n);
 
@@ -1101,7 +1112,7 @@ ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 	ice_tx_backlog_entry_avx512(txep, tx_pkts, nb_commit);
 
-	ice_vtx(txdp, tx_pkts, nb_commit, flags);
+	ice_vtx(txdp, tx_pkts, nb_commit, flags, do_offload);
 
 	tx_id = (uint16_t)(tx_id + nb_commit);
 	if (tx_id > txq->tx_next_rs) {
@@ -1131,7 +1142,30 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 
 		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
-						      &tx_pkts[nb_tx], num);
+				&tx_pkts[nb_tx], num, false);
+		nb_tx += ret;
+		nb_pkts -= ret;
+		if (ret < num)
+			break;
+	}
+
+	return nb_tx;
+}
+
+uint16_t
+ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
+				 uint16_t nb_pkts)
+{
+	uint16_t nb_tx = 0;
+	struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+
+	while (nb_pkts) {
+		uint16_t ret, num;
+
+		num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+		ret = ice_xmit_fixed_burst_vec_avx512(tx_queue,
+				&tx_pkts[nb_tx], num, true);
+
 		nb_tx += ret;
 		nb_pkts -= ret;
 		if (ret < num)
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index a5d76a2936..942647f4e9 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -77,7 +77,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs,
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
 {
 	struct ice_tx_entry *txep;
 	uint32_t n;
@@ -197,7 +197,8 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
 #ifdef CC_AVX512_SUPPORT
 	struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
 
-	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512) {
+	if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
+	    dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
 		struct ice_vec_tx_entry *swr = (void *)txq->sw_ring;
 
 		if (txq->tx_tail < i) {
@@ -267,29 +268,39 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
-#define ICE_NO_VECTOR_FLAGS (				 \
-		DEV_TX_OFFLOAD_MULTI_SEGS |		 \
-		DEV_TX_OFFLOAD_VLAN_INSERT |		 \
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		 \
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		 \
-		DEV_TX_OFFLOAD_UDP_CKSUM |		 \
-		DEV_TX_OFFLOAD_TCP_TSO |		 \
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
 		DEV_TX_OFFLOAD_TCP_CKSUM)
 
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
 {
 	if (!txq)
 		return -1;
 
-	if (txq->offloads & ICE_NO_VECTOR_FLAGS)
-		return -1;
-
 	if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
 	    txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
 		return -1;
 
-	return 0;
+	if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+		return -1;
+
+	if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
+
+	return ICE_VECTOR_PATH;
 }
 
 static inline int
@@ -312,14 +323,19 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_tx_queue *txq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
 		txq = dev->data->tx_queues[i];
-		if (ice_tx_vec_queue_default(txq))
+		ret = ice_tx_vec_queue_default(txq);
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
 #ifdef CC_AVX2_SUPPORT
@@ -521,4 +537,64 @@ ice_rxq_rearm_common(struct ice_rx_queue *rxq, __rte_unused bool avx512)
 }
 #endif
 
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+		       uint64_t *txd_hi)
+{
+	uint64_t ol_flags = tx_pkt->ol_flags;
+	uint32_t td_cmd = 0;
+	uint32_t td_offset = 0;
+
+	/* Tx Checksum Offload */
+	/* SET MACLEN */
+	td_offset |= (tx_pkt->l2_len >> 1) <<
+			ICE_TX_DESC_LEN_MACLEN_S;
+
+	/* Enable L3 checksum offload */
+	if (ol_flags & PKT_TX_IP_CKSUM) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV4) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	} else if (ol_flags & PKT_TX_IPV6) {
+		td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+		td_offset |= (tx_pkt->l3_len >> 2) <<
+			ICE_TX_DESC_LEN_IPLEN_S;
+	}
+
+	/* Enable L4 checksum offloads */
+	switch (ol_flags & PKT_TX_L4_MASK) {
+	case PKT_TX_TCP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+		td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_SCTP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+		td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	case PKT_TX_UDP_CKSUM:
+		td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+		td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+			ICE_TX_DESC_LEN_L4_LEN_S;
+		break;
+	default:
+		break;
+	}
+
+	*txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+	/* Tx VLAN/QINQ insertion Offload */
+	if (ol_flags & (PKT_TX_VLAN | PKT_TX_QINQ)) {
+		td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+		*txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+				ICE_TXD_QW1_L2TAG1_S);
+	}
+
+	*txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
 #endif
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 3e467c48f1..6029cc2d99 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -702,7 +702,7 @@ ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
 
 	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ice_tx_free_bufs(txq);
+		ice_tx_free_bufs_vec(txq);
 
 	nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
 	nb_commit = nb_pkts;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [dpdk-dev] [PATCH v5 2/2] net/ice: add Rx AVX512 offload path
  2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
  2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx " Leyi Rong
@ 2021-04-15  8:58   ` Leyi Rong
  2021-04-16  9:02   ` [dpdk-dev] [PATCH v5 0/2] add alternative " Sun, QinX
  2 siblings, 0 replies; 15+ messages in thread
From: Leyi Rong @ 2021-04-15  8:58 UTC (permalink / raw)
  To: qi.z.zhang, wenzhuo.lu; +Cc: dev, Leyi Rong

Split AVX512 Rx data path into two, one is for basic,
the other one can support additional Rx offload features,
including Rx checksum offload, Rx vlan offload, RSS offload.

Signed-off-by: Leyi Rong <leyi.rong@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx.c            |  50 +++-
 drivers/net/ice/ice_rxtx.h            |   7 +
 drivers/net/ice/ice_rxtx_vec_avx512.c | 345 +++++++++++++++++---------
 drivers/net/ice/ice_rxtx_vec_common.h |  52 ++--
 4 files changed, 299 insertions(+), 155 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 75326c76ab..92fbbc18da 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1059,6 +1059,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	uint32_t ring_size;
 	uint16_t len;
 	int use_def_burst_func = 1;
+	uint64_t offloads;
 
 	if (nb_desc % ICE_ALIGN_RING_DESC != 0 ||
 	    nb_desc > ICE_MAX_RING_DESC ||
@@ -1068,6 +1069,8 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 		return -EINVAL;
 	}
 
+	offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
 	/* Free memory if needed */
 	if (dev->data->rx_queues[queue_idx]) {
 		ice_rx_queue_release(dev->data->rx_queues[queue_idx]);
@@ -1088,6 +1091,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
+	rxq->offloads = offloads;
 
 	rxq->reg_idx = vsi->base_queue + queue_idx;
 	rxq->port_id = dev->data->port_id;
@@ -1990,7 +1994,9 @@ ice_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
 #ifdef CC_AVX512_SUPPORT
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx512_offload ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
 #endif
 	    dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
 	    dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
@@ -3052,12 +3058,14 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ice_rx_queue *rxq;
 	int i;
+	int rx_check_ret;
 	bool use_avx512 = false;
 	bool use_avx2 = false;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
-				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
+		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
 			for (i = 0; i < dev->data->nb_rx_queues; i++) {
 				rxq = dev->data->rx_queues[i];
@@ -3091,11 +3099,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		if (dev->data->scattered_rx) {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Scattered Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_scattered_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Scattered Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_scattered_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3109,11 +3125,19 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		} else {
 			if (use_avx512) {
 #ifdef CC_AVX512_SUPPORT
-				PMD_DRV_LOG(NOTICE,
-					"Using AVX512 Vector Rx (port %d).",
-					dev->data->port_id);
-				dev->rx_pkt_burst =
-					ice_recv_pkts_vec_avx512;
+				if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 OFFLOAD Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512_offload;
+				} else {
+					PMD_DRV_LOG(NOTICE,
+						"Using AVX512 Vector Rx (port %d).",
+						dev->data->port_id);
+					dev->rx_pkt_burst =
+						ice_recv_pkts_vec_avx512;
+				}
 #endif
 			} else {
 				PMD_DRV_LOG(DEBUG,
@@ -3162,7 +3186,9 @@ static const struct {
 #ifdef RTE_ARCH_X86
 #ifdef CC_AVX512_SUPPORT
 	{ ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+	{ ice_recv_scattered_pkts_vec_avx512_offload, "Offload Vector AVX512 Scattered" },
 	{ ice_recv_pkts_vec_avx512,           "Vector AVX512" },
+	{ ice_recv_pkts_vec_avx512_offload,   "Offload Vector AVX512" },
 #endif
 	{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
 	{ ice_recv_pkts_vec_avx2,           "Vector AVX2" },
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f72fad0255..b29387ca0f 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -88,6 +88,7 @@ struct ice_rx_queue {
 	uint64_t xtr_ol_flag; /* Protocol extraction offload flag */
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
+	uint64_t offloads;
 };
 
 struct ice_tx_entry {
@@ -256,9 +257,15 @@ uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts);
 uint16_t ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				  uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx512_offload(void *rx_queue,
+					  struct rte_mbuf **rx_pkts,
+					  uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec_avx512(void *rx_queue,
 					    struct rte_mbuf **rx_pkts,
 					    uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+						    struct rte_mbuf **rx_pkts,
+						    uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 				  uint16_t nb_pkts);
 uint16_t ice_xmit_pkts_vec_avx512_offload(void *tx_queue,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 1c4a59a170..ad6c69da9b 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -150,10 +150,12 @@ ice_flex_rxd_to_fdir_flags_vec_avx512(const __m256i fdir_id0_7)
 	return fdir_flags;
 }
 
-static inline uint16_t
+static __rte_always_inline uint16_t
 _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			      struct rte_mbuf **rx_pkts,
-			      uint16_t nb_pkts, uint8_t *split_packet)
+			      uint16_t nb_pkts,
+			      uint8_t *split_packet,
+			      bool do_offload)
 {
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
@@ -224,6 +226,7 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
 			offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
 
+	/* following code block is for Rx Checksum Offload */
 	/* Status/Error flag masks */
 	/**
 	 * mask everything except Checksum Reports, RSS indication
@@ -487,37 +490,42 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 		__m256i status0_7 = _mm512_extracti64x4_epi64
 			(raw_status0_7, 0);
 
-		/* now do flag manipulation */
+		__m256i mbuf_flags = _mm256_set1_epi32(0);
 
-		/* get only flag/error bits we want */
-		const __m256i flag_bits =
-			_mm256_and_si256(status0_7, flags_mask);
-		/**
-		 * l3_l4_error flags, shuffle, then shift to correct adjustment
-		 * of flags in flags_shuf, and finally mask out extra bits
-		 */
-		__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
-				_mm256_srli_epi32(flag_bits, 4));
-		l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-		__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
-		__m256i l4_outer_flags =
-				_mm256_and_si256(l3_l4_flags, l4_outer_mask);
-		l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
-		__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
-		l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
-		l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
-		/* set rss and vlan flags */
-		const __m256i rss_vlan_flag_bits =
-			_mm256_srli_epi32(flag_bits, 12);
-		const __m256i rss_vlan_flags =
-			_mm256_shuffle_epi8(rss_vlan_flags_shuf,
-					    rss_vlan_flag_bits);
-
-		/* merge flags */
-		__m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
+		if (do_offload) {
+			/* now do flag manipulation */
+
+			/* get only flag/error bits we want */
+			const __m256i flag_bits =
+				_mm256_and_si256(status0_7, flags_mask);
+			/**
+			 * l3_l4_error flags, shuffle, then shift to correct adjustment
+			 * of flags in flags_shuf, and finally mask out extra bits
+			 */
+			__m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+					_mm256_srli_epi32(flag_bits, 4));
+			l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+			__m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+			__m256i l4_outer_flags =
+					_mm256_and_si256(l3_l4_flags, l4_outer_mask);
+			l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+			__m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+			l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+			l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+			/* set rss and vlan flags */
+			const __m256i rss_vlan_flag_bits =
+				_mm256_srli_epi32(flag_bits, 12);
+			const __m256i rss_vlan_flags =
+				_mm256_shuffle_epi8(rss_vlan_flags_shuf,
+						    rss_vlan_flag_bits);
+
+			/* merge flags */
+			mbuf_flags = _mm256_or_si256(l3_l4_flags,
 						     rss_vlan_flags);
+		}
 
 		if (rxq->fdir_enabled) {
 			const __m256i fdir_id4_7 =
@@ -529,12 +537,19 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 			const __m256i fdir_id0_7 =
 				_mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3);
 
-			const __m256i fdir_flags =
-				ice_flex_rxd_to_fdir_flags_vec_avx512
-					(fdir_id0_7);
-
-			/* merge with fdir_flags */
-			mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags);
+			if (do_offload) {
+				const __m256i fdir_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+
+				/* merge with fdir_flags */
+				mbuf_flags = _mm256_or_si256
+						(mbuf_flags, fdir_flags);
+			} else {
+				mbuf_flags =
+					ice_flex_rxd_to_fdir_flags_vec_avx512
+						(fdir_id0_7);
+			}
 
 			/* write to mbuf: have to use scalar store here */
 			rx_pkts[i + 0]->hash.fdir.hi =
@@ -562,95 +577,97 @@ _ice_recv_raw_pkts_vec_avx512(struct ice_rx_queue *rxq,
 				_mm256_extract_epi32(fdir_id0_7, 4);
 		} /* if() on fdir_enabled */
 
+		if (do_offload) {
 #ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		/**
-		 * needs to load 2nd 16B of each desc for RSS hash parsing,
-		 * will cause performance drop to get into this context.
-		 */
-		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
-				DEV_RX_OFFLOAD_RSS_HASH) {
-			/* load bottom half of every 32B desc */
-			const __m128i raw_desc_bh7 =
-				_mm_load_si128
-					((void *)(&rxdp[7].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh6 =
-				_mm_load_si128
-					((void *)(&rxdp[6].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh5 =
-				_mm_load_si128
-					((void *)(&rxdp[5].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh4 =
-				_mm_load_si128
-					((void *)(&rxdp[4].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh3 =
-				_mm_load_si128
-					((void *)(&rxdp[3].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh2 =
-				_mm_load_si128
-					((void *)(&rxdp[2].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh1 =
-				_mm_load_si128
-					((void *)(&rxdp[1].wb.status_error1));
-			rte_compiler_barrier();
-			const __m128i raw_desc_bh0 =
-				_mm_load_si128
-					((void *)(&rxdp[0].wb.status_error1));
-
-			__m256i raw_desc_bh6_7 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh6),
-					raw_desc_bh7, 1);
-			__m256i raw_desc_bh4_5 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh4),
-					raw_desc_bh5, 1);
-			__m256i raw_desc_bh2_3 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh2),
-					raw_desc_bh3, 1);
-			__m256i raw_desc_bh0_1 =
-				_mm256_inserti128_si256
-					(_mm256_castsi128_si256(raw_desc_bh0),
-					raw_desc_bh1, 1);
-
 			/**
-			 * to shift the 32b RSS hash value to the
-			 * highest 32b of each 128b before mask
+			 * needs to load 2nd 16B of each desc for RSS hash parsing,
+			 * will cause performance drop to get into this context.
 			 */
-			__m256i rss_hash6_7 =
-				_mm256_slli_epi64(raw_desc_bh6_7, 32);
-			__m256i rss_hash4_5 =
-				_mm256_slli_epi64(raw_desc_bh4_5, 32);
-			__m256i rss_hash2_3 =
-				_mm256_slli_epi64(raw_desc_bh2_3, 32);
-			__m256i rss_hash0_1 =
-				_mm256_slli_epi64(raw_desc_bh0_1, 32);
-
-			__m256i rss_hash_msk =
-				_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
-						 0xFFFFFFFF, 0, 0, 0);
-
-			rss_hash6_7 = _mm256_and_si256
-					(rss_hash6_7, rss_hash_msk);
-			rss_hash4_5 = _mm256_and_si256
-					(rss_hash4_5, rss_hash_msk);
-			rss_hash2_3 = _mm256_and_si256
-					(rss_hash2_3, rss_hash_msk);
-			rss_hash0_1 = _mm256_and_si256
-					(rss_hash0_1, rss_hash_msk);
-
-			mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
-			mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
-			mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
-			mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
-		} /* if() on RSS hash parsing */
+			if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+					DEV_RX_OFFLOAD_RSS_HASH) {
+				/* load bottom half of every 32B desc */
+				const __m128i raw_desc_bh7 =
+					_mm_load_si128
+						((void *)(&rxdp[7].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh6 =
+					_mm_load_si128
+						((void *)(&rxdp[6].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh5 =
+					_mm_load_si128
+						((void *)(&rxdp[5].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh4 =
+					_mm_load_si128
+						((void *)(&rxdp[4].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh3 =
+					_mm_load_si128
+						((void *)(&rxdp[3].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh2 =
+					_mm_load_si128
+						((void *)(&rxdp[2].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh1 =
+					_mm_load_si128
+						((void *)(&rxdp[1].wb.status_error1));
+				rte_compiler_barrier();
+				const __m128i raw_desc_bh0 =
+					_mm_load_si128
+						((void *)(&rxdp[0].wb.status_error1));
+
+				__m256i raw_desc_bh6_7 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh6),
+						raw_desc_bh7, 1);
+				__m256i raw_desc_bh4_5 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh4),
+						raw_desc_bh5, 1);
+				__m256i raw_desc_bh2_3 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh2),
+						raw_desc_bh3, 1);
+				__m256i raw_desc_bh0_1 =
+					_mm256_inserti128_si256
+						(_mm256_castsi128_si256(raw_desc_bh0),
+						raw_desc_bh1, 1);
+
+				/**
+				 * to shift the 32b RSS hash value to the
+				 * highest 32b of each 128b before mask
+				 */
+				__m256i rss_hash6_7 =
+					_mm256_slli_epi64(raw_desc_bh6_7, 32);
+				__m256i rss_hash4_5 =
+					_mm256_slli_epi64(raw_desc_bh4_5, 32);
+				__m256i rss_hash2_3 =
+					_mm256_slli_epi64(raw_desc_bh2_3, 32);
+				__m256i rss_hash0_1 =
+					_mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+				__m256i rss_hash_msk =
+					_mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+							 0xFFFFFFFF, 0, 0, 0);
+
+				rss_hash6_7 = _mm256_and_si256
+						(rss_hash6_7, rss_hash_msk);
+				rss_hash4_5 = _mm256_and_si256
+						(rss_hash4_5, rss_hash_msk);
+				rss_hash2_3 = _mm256_and_si256
+						(rss_hash2_3, rss_hash_msk);
+				rss_hash0_1 = _mm256_and_si256
+						(rss_hash0_1, rss_hash_msk);
+
+				mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+				mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+				mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+				mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+			} /* if() on RSS hash parsing */
 #endif
+		}
 
 		/**
 		 * At this point, we have the 8 sets of flags in the low 16-bits
@@ -806,7 +823,19 @@ uint16_t
 ice_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 			 uint16_t nb_pkts)
 {
-	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL);
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts, nb_pkts, NULL, false);
+}
+
+/**
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_pkts_vec_avx512_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+				 uint16_t nb_pkts)
+{
+	return _ice_recv_raw_pkts_vec_avx512(rx_queue, rx_pkts,
+					     nb_pkts, NULL, true);
 }
 
 /**
@@ -823,7 +852,49 @@ ice_recv_scattered_burst_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 	/* get some new buffers */
 	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq, rx_pkts, nb_pkts,
-						       split_flags);
+						       split_flags, false);
+	if (nb_bufs == 0)
+		return 0;
+
+	/* happy day case, full burst + no packets to be joined */
+	const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+	if (!rxq->pkt_first_seg &&
+	    split_fl64[0] == 0 && split_fl64[1] == 0 &&
+	    split_fl64[2] == 0 && split_fl64[3] == 0)
+		return nb_bufs;
+
+	/* reassemble any packets that need reassembly */
+	unsigned int i = 0;
+
+	if (!rxq->pkt_first_seg) {
+		/* find the first split flag, and only reassemble then */
+		while (i < nb_bufs && !split_flags[i])
+			i++;
+		if (i == nb_bufs)
+			return nb_bufs;
+		rxq->pkt_first_seg = rx_pkts[i];
+	}
+	return i + ice_rx_reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+					     &split_flags[i]);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+static uint16_t
+ice_recv_scattered_burst_vec_avx512_offload(void *rx_queue,
+					    struct rte_mbuf **rx_pkts,
+					    uint16_t nb_pkts)
+{
+	struct ice_rx_queue *rxq = rx_queue;
+	uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
+
+	/* get some new buffers */
+	uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx512(rxq,
+				rx_pkts, nb_pkts, split_flags, true);
 	if (nb_bufs == 0)
 		return 0;
 
@@ -874,6 +945,32 @@ ice_recv_scattered_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				rx_pkts + retval, nb_pkts);
 }
 
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ * Main receive routine that can handle arbitrary burst sizes
+ * Notice:
+ * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
+ */
+uint16_t
+ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+					   struct rte_mbuf **rx_pkts,
+					   uint16_t nb_pkts)
+{
+	uint16_t retval = 0;
+
+	while (nb_pkts > ICE_VPMD_RX_BURST) {
+		uint16_t burst =
+			ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, ICE_VPMD_RX_BURST);
+		retval += burst;
+		nb_pkts -= burst;
+		if (burst < ICE_VPMD_RX_BURST)
+			return retval;
+	}
+	return retval + ice_recv_scattered_burst_vec_avx512_offload(rx_queue,
+				rx_pkts + retval, nb_pkts);
+}
+
 static __rte_always_inline int
 ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
 {
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h
index 942647f4e9..6e8d7a6fc5 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -247,6 +247,28 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq)
 	return 0;
 }
 
+#define ICE_TX_NO_VECTOR_FLAGS (			\
+		DEV_TX_OFFLOAD_MULTI_SEGS |		\
+		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
+		DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ICE_TX_VECTOR_OFFLOAD (				\
+		DEV_TX_OFFLOAD_VLAN_INSERT |		\
+		DEV_TX_OFFLOAD_QINQ_INSERT |		\
+		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
+		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_TX_OFFLOAD_UDP_CKSUM |		\
+		DEV_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_RX_VECTOR_OFFLOAD (				\
+		DEV_RX_OFFLOAD_CHECKSUM |		\
+		DEV_RX_OFFLOAD_SCTP_CKSUM |		\
+		DEV_RX_OFFLOAD_VLAN |			\
+		DEV_RX_OFFLOAD_RSS_HASH)
+
+#define ICE_VECTOR_PATH		0
+#define ICE_VECTOR_OFFLOAD_PATH	1
+
 static inline int
 ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 {
@@ -265,24 +287,11 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 	if (rxq->proto_xtr != PROTO_XTR_NONE)
 		return -1;
 
-	return 0;
-}
-
-#define ICE_TX_NO_VECTOR_FLAGS (			\
-		DEV_TX_OFFLOAD_MULTI_SEGS |		\
-		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |	\
-		DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ICE_TX_VECTOR_OFFLOAD (				\
-		DEV_TX_OFFLOAD_VLAN_INSERT |		\
-		DEV_TX_OFFLOAD_QINQ_INSERT |		\
-		DEV_TX_OFFLOAD_IPV4_CKSUM |		\
-		DEV_TX_OFFLOAD_SCTP_CKSUM |		\
-		DEV_TX_OFFLOAD_UDP_CKSUM |		\
-		DEV_TX_OFFLOAD_TCP_CKSUM)
+	if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD)
+		return ICE_VECTOR_OFFLOAD_PATH;
 
-#define ICE_VECTOR_PATH		0
-#define ICE_VECTOR_OFFLOAD_PATH	1
+	return ICE_VECTOR_PATH;
+}
 
 static inline int
 ice_tx_vec_queue_default(struct ice_tx_queue *txq)
@@ -308,14 +317,19 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
 	int i;
 	struct ice_rx_queue *rxq;
+	int ret = 0;
+	int result = 0;
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
 		rxq = dev->data->rx_queues[i];
-		if (ice_rx_vec_queue_default(rxq))
+		ret = (ice_rx_vec_queue_default(rxq));
+		if (ret < 0)
 			return -1;
+		if (ret == ICE_VECTOR_OFFLOAD_PATH)
+			result = ret;
 	}
 
-	return 0;
+	return result;
 }
 
 static inline int
-- 
2.17.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
  2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
  2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx " Leyi Rong
  2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 2/2] net/ice: add Rx " Leyi Rong
@ 2021-04-16  9:02   ` Sun, QinX
  2021-04-16 10:42     ` Zhang, Qi Z
  2 siblings, 1 reply; 15+ messages in thread
From: Sun, QinX @ 2021-04-16  9:02 UTC (permalink / raw)
  To: Rong, Leyi, Zhang, Qi Z, Lu, Wenzhuo; +Cc: dev, Sun, QinX

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong
> Sent: Thursday, April 15, 2021 4:58 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org; Rong, Leyi <leyi.rong@intel.com>
> Subject: [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
> 
> Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
> offload features, like checksum/vlan/RSS/QinQ offload.

Tested-by: Qin Sun <qinx.sun@intel.com>


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
  2021-04-16  9:02   ` [dpdk-dev] [PATCH v5 0/2] add alternative " Sun, QinX
@ 2021-04-16 10:42     ` Zhang, Qi Z
  0 siblings, 0 replies; 15+ messages in thread
From: Zhang, Qi Z @ 2021-04-16 10:42 UTC (permalink / raw)
  To: Sun, QinX, Rong, Leyi, Lu, Wenzhuo; +Cc: dev



> -----Original Message-----
> From: Sun, QinX <qinx.sun@intel.com>
> Sent: Friday, April 16, 2021 5:02 PM
> To: Rong, Leyi <leyi.rong@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Lu,
> Wenzhuo <wenzhuo.lu@intel.com>
> Cc: dev@dpdk.org; Sun, QinX <qinx.sun@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
> 
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Leyi Rong
> > Sent: Thursday, April 15, 2021 4:58 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>
> > Cc: dev@dpdk.org; Rong, Leyi <leyi.rong@intel.com>
> > Subject: [dpdk-dev] [PATCH v5 0/2] add alternative AVX512 offload path
> >
> > Add alternative Rx/Tx offload path for AVX512, which can support Rx/Tx
> > offload features, like checksum/vlan/RSS/QinQ offload.
> 
> Tested-by: Qin Sun <qinx.sun@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi
> 


^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2021-04-16 10:42 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-17  9:14 [dpdk-dev] [PATCH 0/2] add alternative AVX512 offload path Leyi Rong
2021-03-17  9:14 ` [dpdk-dev] [PATCH 1/2] net/ice: add Tx " Leyi Rong
2021-03-17  9:14 ` [dpdk-dev] [PATCH 2/2] net/ice: add Rx " Leyi Rong
2021-03-18 10:35   ` Van Haaren, Harry
2021-04-12  4:03 ` [dpdk-dev] [PATCH v3 0/2] add alternative " Leyi Rong
2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 1/2] net/ice: add Tx " Leyi Rong
2021-04-12  4:03   ` [dpdk-dev] [PATCH v3 2/2] net/ice: add Rx " Leyi Rong
2021-04-15  1:13 ` [dpdk-dev] [PATCH v4 0/2] add alternative " Leyi Rong
2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 1/2] net/ice: add Tx " Leyi Rong
2021-04-15  1:13   ` [dpdk-dev] [PATCH v4 2/2] net/ice: add Rx " Leyi Rong
2021-04-15  8:58 ` [dpdk-dev] [PATCH v5 0/2] add alternative " Leyi Rong
2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 1/2] net/ice: add Tx " Leyi Rong
2021-04-15  8:58   ` [dpdk-dev] [PATCH v5 2/2] net/ice: add Rx " Leyi Rong
2021-04-16  9:02   ` [dpdk-dev] [PATCH v5 0/2] add alternative " Sun, QinX
2021-04-16 10:42     ` Zhang, Qi Z

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git