DPDK patches and discussions
 help / color / mirror / Atom feed
From: Guinan Sun <guinanx.sun@intel.com>
To: dev@dpdk.org
Cc: Qi Zhang <qi.z.zhang@intel.com>,
	Qiming Yang <qiming.yang@intel.com>,
	Junyu Jiang <junyux.jiang@intel.com>
Subject: [dpdk-dev] [PATCH 2/7] net/ice: change RSS hash parsing in SSE path
Date: Wed, 26 Aug 2020 07:54:56 +0000	[thread overview]
Message-ID: <20200826075501.50052-3-guinanx.sun@intel.com> (raw)
In-Reply-To: <20200826075501.50052-1-guinanx.sun@intel.com>

From: Junyu Jiang <junyux.jiang@intel.com>

Change RSS hash parsing from Flex Rx descriptor in SSE data path.

Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
---
 drivers/net/ice/ice_rxtx_vec_sse.c | 89 ++++++++++++++++++++++++------
 1 file changed, 73 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c
index 382ef31f3..fffb27138 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -230,7 +230,8 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	const __m128i zero = _mm_setzero_si128();
 	/* mask to shuffle from desc. to mbuf */
 	const __m128i shuf_msk = _mm_set_epi8
-			(15, 14, 13, 12,  /* octet 12~15, 32 bits rss */
+			(0xFF, 0xFF,
+			 0xFF, 0xFF,  /* rss hash parsed separately */
 			 11, 10,      /* octet 10~11, 16 bits vlan_macip */
 			 5, 4,        /* octet 4~5, 16 bits data_len */
 			 0xFF, 0xFF,  /* skip high 16 bits pkt_len, zero out */
@@ -321,7 +322,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	     pos += ICE_DESCS_PER_LOOP,
 	     rxdp += ICE_DESCS_PER_LOOP) {
 		__m128i descs[ICE_DESCS_PER_LOOP];
-		__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+		__m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
 		__m128i staterr, sterr_tmp1, sterr_tmp2;
 		/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
 		__m128i mbp1;
@@ -367,8 +368,12 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		rte_compiler_barrier();
 
 		/* D.1 pkt 3,4 convert format from desc to pktmbuf */
-		pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
-		pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+		pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
+		pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
+		pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
+		pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
 
 		/* C.1 4=>2 filter staterr info only */
 		sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
@@ -378,12 +383,68 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		ice_rx_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
 
 		/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
-		pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
 		pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
 
-		/* D.1 pkt 1,2 convert format from desc to pktmbuf */
-		pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
-		pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+		pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+		/**
+		 * needs to load 2nd 16B of each desc for RSS hash parsing,
+		 * will cause performance drop to get into this context.
+		 */
+		if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+				DEV_RX_OFFLOAD_RSS_HASH) {
+			/* load bottom half of every 32B desc */
+			const __m128i raw_desc_bh3 =
+				_mm_load_si128
+					((void *)(&rxdp[3].wb.status_error1));
+			rte_compiler_barrier();
+			const __m128i raw_desc_bh2 =
+				_mm_load_si128
+					((void *)(&rxdp[2].wb.status_error1));
+			rte_compiler_barrier();
+			const __m128i raw_desc_bh1 =
+				_mm_load_si128
+					((void *)(&rxdp[1].wb.status_error1));
+			rte_compiler_barrier();
+			const __m128i raw_desc_bh0 =
+				_mm_load_si128
+					((void *)(&rxdp[0].wb.status_error1));
+
+			/**
+			 * to shift the 32b RSS hash value to the
+			 * highest 32b of each 128b before mask
+			 */
+			__m128i rss_hash3 =
+				_mm_slli_epi64(raw_desc_bh3, 32);
+			__m128i rss_hash2 =
+				_mm_slli_epi64(raw_desc_bh2, 32);
+			__m128i rss_hash1 =
+				_mm_slli_epi64(raw_desc_bh1, 32);
+			__m128i rss_hash0 =
+				_mm_slli_epi64(raw_desc_bh0, 32);
+
+			__m128i rss_hash_msk =
+				_mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
+
+			rss_hash3 = _mm_and_si128
+					(rss_hash3, rss_hash_msk);
+			rss_hash2 = _mm_and_si128
+					(rss_hash2, rss_hash_msk);
+			rss_hash1 = _mm_and_si128
+					(rss_hash1, rss_hash_msk);
+			rss_hash0 = _mm_and_si128
+					(rss_hash0, rss_hash_msk);
+
+			pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
+			pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
+			pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
+			pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
+		} /* if() on RSS hash parsing */
+#endif
 
 		/* C.2 get 4 pkts staterr value  */
 		staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
@@ -391,14 +452,10 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		/* D.3 copy final 3,4 data to rx_pkts */
 		_mm_storeu_si128
 			((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
-			 pkt_mb4);
+			 pkt_mb3);
 		_mm_storeu_si128
 			((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
-			 pkt_mb3);
-
-		/* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
-		pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
-		pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+			 pkt_mb2);
 
 		/* C* extract and record EOP bit */
 		if (split_packet) {
@@ -422,9 +479,9 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 		/* D.3 copy final 1,2 data to rx_pkts */
 		_mm_storeu_si128
 			((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
-			 pkt_mb2);
+			 pkt_mb1);
 		_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
-				 pkt_mb1);
+				 pkt_mb0);
 		ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
 		/* C.4 calc avaialbe number of desc */
 		var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
-- 
2.17.1


  parent reply	other threads:[~2020-08-26  8:13 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-26  7:54 [dpdk-dev] [PATCH 0/7] support RXDID22 and FDID22 Guinan Sun
2020-08-26  7:54 ` [dpdk-dev] [PATCH 1/7] net/ice: change RSS hash parsing in AVX path Guinan Sun
2020-08-26  7:54 ` Guinan Sun [this message]
2020-08-26  7:54 ` [dpdk-dev] [PATCH 3/7] net/ice: support flexible descriptor RxDID #22 Guinan Sun
2020-08-26  7:54 ` [dpdk-dev] [PATCH 4/7] net/ice: remove devargs flow-mark-support Guinan Sun
2020-08-26  7:54 ` [dpdk-dev] [PATCH 5/7] net/ice: add flow director enabled switch value Guinan Sun
2020-08-26  7:55 ` [dpdk-dev] [PATCH 6/7] net/ice: support Flex Rx desc and flow mark in AVX path Guinan Sun
2020-08-26  7:55 ` [dpdk-dev] [PATCH 7/7] net/ice: support Flex Rx desc and flow mark in SSE path Guinan Sun
2020-09-07  5:43 ` [dpdk-dev] [PATCH 0/7] support RXDID22 and FDID22 Zhang, Qi Z
2020-09-07  5:55   ` Jiang, JunyuX
2020-09-07  9:17 ` [dpdk-dev] [PATCH v2 0/5] supports RxDID #22 and FDID Junyu Jiang
2020-09-07  9:17   ` [dpdk-dev] [PATCH v2 1/5] net/ice: support flex Rx descriptor RxDID #22 Junyu Jiang
2020-09-07  9:17   ` [dpdk-dev] [PATCH v2 2/5] net/ice: add flow director enabled switch value Junyu Jiang
2020-09-08  7:52     ` Yang, Qiming
2020-09-07  9:17   ` [dpdk-dev] [PATCH v2 3/5] net/ice: support flow mark in AVX path Junyu Jiang
2020-09-08  7:54     ` Yang, Qiming
2020-09-07  9:17   ` [dpdk-dev] [PATCH v2 4/5] net/ice: support flow mark in SSE path Junyu Jiang
2020-09-07  9:17   ` [dpdk-dev] [PATCH v2 5/5] net/ice: remove devargs flow-mark-support Junyu Jiang
2020-09-08  7:55     ` Yang, Qiming
2020-09-16  3:09 ` [dpdk-dev] [PATCH v3 0/5] supports RxDID #22 and FDID Junyu Jiang
2020-09-16  3:09   ` [dpdk-dev] [PATCH v3 1/5] net/ice: support flex Rx descriptor RxDID #22 Junyu Jiang
2020-09-16  3:09   ` [dpdk-dev] [PATCH v3 2/5] net/ice: add flow director enabled switch value Junyu Jiang
2020-09-16  3:10   ` [dpdk-dev] [PATCH v3 3/5] net/ice: support flow mark in AVX path Junyu Jiang
2020-09-16  3:10   ` [dpdk-dev] [PATCH v3 4/5] net/ice: support flow mark in SSE path Junyu Jiang
2020-09-16  3:10   ` [dpdk-dev] [PATCH v3 5/5] net/ice: remove devargs flow-mark-support Junyu Jiang
2020-09-16  6:30   ` [dpdk-dev] [PATCH v3 0/5] supports RxDID #22 and FDID Rong, Leyi
2020-09-16  6:42     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200826075501.50052-3-guinanx.sun@intel.com \
    --to=guinanx.sun@intel.com \
    --cc=dev@dpdk.org \
    --cc=junyux.jiang@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).