DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/i40e: remove avx512 specific Rx queue rearm code
@ 2023-02-07  6:38 Wenzhuo Lu
  2023-02-07  6:38 ` [PATCH] net/iavf: " Wenzhuo Lu
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Wenzhuo Lu @ 2023-02-07  6:38 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

'i40e_rxq_rearm' in avx512 path is optimized to improve the performance.
But after the commit a2833ecc5ea4 ("mempool: fix get objects from mempool
with cache"), this avx512 specific optimization is not necessary.
This patch remove the unnecessary PMD specific optimization to make the
code easier to maintain and get the benefit from the enhancement of common
lib.

Reported-by: Haijun Chu <haijun.chu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/i40e/i40e_rxtx_vec_avx512.c | 125 +-----------------------
 1 file changed, 1 insertion(+), 124 deletions(-)

diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 60c97d5331..d3c7bfd121 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -24,130 +24,7 @@
 static __rte_always_inline void
 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 {
-	int i;
-	uint16_t rx_id;
-	volatile union i40e_rx_desc *rxdp;
-	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
-	struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
-			rte_lcore_id());
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	if (unlikely(!cache))
-		return i40e_rxq_rearm_common(rxq, true);
-
-	/* We need to pull 'n' more MBUFs into the software ring from mempool
-	 * We inline the mempool function here, so we can vectorize the copy
-	 * from the cache into the shadow ring.
-	 */
-
-	if (cache->len < RTE_I40E_RXQ_REARM_THRESH) {
-		/* No. Backfill the cache first, and then fill from it */
-		uint32_t req = RTE_I40E_RXQ_REARM_THRESH + (cache->size -
-				cache->len);
-
-		/* How many do we require
-		 * i.e. number to fill the cache + the request
-		 */
-		int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
-				&cache->objs[cache->len], req);
-		if (ret == 0) {
-			cache->len += req;
-		} else {
-			if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
-					rxq->nb_rx_desc) {
-				__m128i dma_addr0;
-
-				dma_addr0 = _mm_setzero_si128();
-				for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
-					rxep[i].mbuf = &rxq->fake_mbuf;
-					_mm_store_si128
-						((__m128i *)&rxdp[i].read,
-							dma_addr0);
-				}
-			}
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-					RTE_I40E_RXQ_REARM_THRESH;
-			return;
-		}
-	}
-
-	const __m512i iova_offsets =  _mm512_set1_epi64
-		(offsetof(struct rte_mbuf, buf_iova));
-	const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
-
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-	/* to shuffle the addresses to correct slots. Values 4-7 will contain
-	 * zeros, so use 7 for a zero-value.
-	 */
-	const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
-#else
-	const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
-#endif
-
-	/* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
-	 * from mempool cache and populating both shadow and HW rings
-	 */
-	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH / 8; i++) {
-		const __m512i mbuf_ptrs = _mm512_loadu_si512
-			(&cache->objs[cache->len - 8]);
-		_mm512_store_si512(rxep, mbuf_ptrs);
-
-		/* gather iova of mbuf0-7 into one zmm reg */
-		const __m512i iova_base_addrs = _mm512_i64gather_epi64
-			(_mm512_add_epi64(mbuf_ptrs, iova_offsets),
-				0, /* base */
-				1 /* scale */);
-		const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
-				headroom);
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-		const __m512i iovas0 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 0));
-		const __m512i iovas1 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 1));
-
-		/* permute leaves desc 2-3 addresses in header address slots 0-1
-		 * but these are ignored by driver since header split not
-		 * enabled. Similarly for desc 4 & 5.
-		 */
-		const __m512i desc_rd_0_1 = _mm512_permutexvar_epi64
-			(permute_idx, iovas0);
-		const __m512i desc_rd_2_3 = _mm512_bsrli_epi128(desc_rd_0_1, 8);
-
-		const __m512i desc_rd_4_5 = _mm512_permutexvar_epi64
-			(permute_idx, iovas1);
-		const __m512i desc_rd_6_7 = _mm512_bsrli_epi128(desc_rd_4_5, 8);
-
-		_mm512_store_si512((void *)rxdp, desc_rd_0_1);
-		_mm512_store_si512((void *)(rxdp + 2), desc_rd_2_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc_rd_4_5);
-		_mm512_store_si512((void *)(rxdp + 6), desc_rd_6_7);
-#else
-		/* permute leaves desc 4-7 addresses in header address slots 0-3
-		 * but these are ignored by driver since header split not
-		 * enabled.
-		 */
-		const __m512i desc_rd_0_3 = _mm512_permutexvar_epi64
-			(permute_idx, iova_addrs);
-		const __m512i desc_rd_4_7 = _mm512_bsrli_epi128(desc_rd_0_3, 8);
-
-		_mm512_store_si512((void *)rxdp, desc_rd_0_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc_rd_4_7);
-#endif
-		rxep += 8, rxdp += 8, cache->len -= 8;
-	}
-
-	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
-			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+	return i40e_rxq_rearm_common(rxq, true);
 }
 
 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] net/iavf: remove avx512 specific Rx queue rearm code
  2023-02-07  6:38 [PATCH] net/i40e: remove avx512 specific Rx queue rearm code Wenzhuo Lu
@ 2023-02-07  6:38 ` Wenzhuo Lu
  2023-02-08  3:14   ` Zhang, Qi Z
  2023-02-07  6:39 ` [PATCH] net/ice: " Wenzhuo Lu
  2023-02-07  6:39 ` [PATCH] net/i40e: " Wenzhuo Lu
  2 siblings, 1 reply; 8+ messages in thread
From: Wenzhuo Lu @ 2023-02-07  6:38 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

'iavf_rxq_rearm' in avx512 path is optimized to improve the performance.
But after the commit a2833ecc5ea4 ("mempool: fix get objects from mempool
with cache"), this avx512 specific optimization is not necessary.
This patch remove the unnecessary PMD specific optimization to make the
code easier to maintain and get the benefit from the enhancement of common
lib.

Reported-by: Haijun Chu <haijun.chu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 126 +-----------------------
 1 file changed, 1 insertion(+), 125 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index b416a716cf..0abedbb3bb 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -32,131 +32,7 @@
 static __rte_always_inline void
 iavf_rxq_rearm(struct iavf_rx_queue *rxq)
 {
-	int i;
-	uint16_t rx_id;
-	volatile union iavf_rx_desc *rxdp;
-	struct rte_mempool_cache *cache =
-		rte_mempool_default_cache(rxq->mp, rte_lcore_id());
-	struct rte_mbuf **rxp = &rxq->sw_ring[rxq->rxrearm_start];
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	if (unlikely(!cache))
-		return iavf_rxq_rearm_common(rxq, true);
-
-	/* We need to pull 'n' more MBUFs into the software ring from mempool
-	 * We inline the mempool function here, so we can vectorize the copy
-	 * from the cache into the shadow ring.
-	 */
-
-	/* Can this be satisfied from the cache? */
-	if (cache->len < IAVF_RXQ_REARM_THRESH) {
-		/* No. Backfill the cache first, and then fill from it */
-		uint32_t req = IAVF_RXQ_REARM_THRESH + (cache->size -
-							cache->len);
-
-		/* How many do we require i.e. number to fill the cache + the request */
-		int ret = rte_mempool_ops_dequeue_bulk
-				(rxq->mp, &cache->objs[cache->len], req);
-		if (ret == 0) {
-			cache->len += req;
-		} else {
-			if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
-			    rxq->nb_rx_desc) {
-				__m128i dma_addr0;
-
-				dma_addr0 = _mm_setzero_si128();
-				for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
-					rxp[i] = &rxq->fake_mbuf;
-					_mm_storeu_si128((__m128i *)&rxdp[i].read,
-							 dma_addr0);
-				}
-			}
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-					IAVF_RXQ_REARM_THRESH;
-			return;
-		}
-	}
-
-	const __m512i iova_offsets =  _mm512_set1_epi64(offsetof
-							(struct rte_mbuf, buf_iova));
-	const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
-
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	/* to shuffle the addresses to correct slots. Values 4-7 will contain
-	 * zeros, so use 7 for a zero-value.
-	 */
-	const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
-#else
-	const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
-#endif
-
-	/* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
-	 * from mempool cache and populating both shadow and HW rings
-	 */
-	for (i = 0; i < IAVF_RXQ_REARM_THRESH / IAVF_DESCS_PER_LOOP_AVX; i++) {
-		const __m512i mbuf_ptrs = _mm512_loadu_si512
-			(&cache->objs[cache->len - IAVF_DESCS_PER_LOOP_AVX]);
-		_mm512_storeu_si512(rxp, mbuf_ptrs);
-
-		const __m512i iova_base_addrs = _mm512_i64gather_epi64
-				(_mm512_add_epi64(mbuf_ptrs, iova_offsets),
-				 0, /* base */
-				 1  /* scale */);
-		const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
-				headroom);
-#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-		const __m512i iovas0 = _mm512_castsi256_si512
-				(_mm512_extracti64x4_epi64(iova_addrs, 0));
-		const __m512i iovas1 = _mm512_castsi256_si512
-				(_mm512_extracti64x4_epi64(iova_addrs, 1));
-
-		/* permute leaves desc 2-3 addresses in header address slots 0-1
-		 * but these are ignored by driver since header split not
-		 * enabled. Similarly for desc 6 & 7.
-		 */
-		const __m512i desc0_1 = _mm512_permutexvar_epi64
-				(permute_idx,
-				 iovas0);
-		const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
-
-		const __m512i desc4_5 = _mm512_permutexvar_epi64
-				(permute_idx,
-				 iovas1);
-		const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
-
-		_mm512_storeu_si512((void *)rxdp, desc0_1);
-		_mm512_storeu_si512((void *)(rxdp + 2), desc2_3);
-		_mm512_storeu_si512((void *)(rxdp + 4), desc4_5);
-		_mm512_storeu_si512((void *)(rxdp + 6), desc6_7);
-#else
-		/* permute leaves desc 4-7 addresses in header address slots 0-3
-		 * but these are ignored by driver since header split not
-		 * enabled.
-		 */
-		const __m512i desc0_3 = _mm512_permutexvar_epi64(permute_idx,
-								 iova_addrs);
-		const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
-
-		_mm512_storeu_si512((void *)rxdp, desc0_3);
-		_mm512_storeu_si512((void *)(rxdp + 4), desc4_7);
-#endif
-		rxp += IAVF_DESCS_PER_LOOP_AVX;
-		rxdp += IAVF_DESCS_PER_LOOP_AVX;
-		cache->len -= IAVF_DESCS_PER_LOOP_AVX;
-	}
-
-	rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
-			   (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+	iavf_rxq_rearm_common(rxq, true);
 }
 
 #define IAVF_RX_LEN_MASK 0x80808080
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] net/ice: remove avx512 specific Rx queue rearm code
  2023-02-07  6:38 [PATCH] net/i40e: remove avx512 specific Rx queue rearm code Wenzhuo Lu
  2023-02-07  6:38 ` [PATCH] net/iavf: " Wenzhuo Lu
@ 2023-02-07  6:39 ` Wenzhuo Lu
  2023-02-08  2:59   ` [PATCH v2] " Wenzhuo Lu
  2023-02-07  6:39 ` [PATCH] net/i40e: " Wenzhuo Lu
  2 siblings, 1 reply; 8+ messages in thread
From: Wenzhuo Lu @ 2023-02-07  6:39 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

'ice_rxq_rearm' in avx512 path is optimized to improve the performance.
But after the commit a2833ecc5ea4 ("mempool: fix get objects from mempool
with cache"), this avx512 specific optimization is not necessary.
This patch remove the unnecessary PMD specific optimization to make the
code easier to maintain and get the benefit from the enhancement of common
lib.

Reported-by: Haijun Chu <haijun.chu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/ice/ice_rxtx_vec_avx512.c | 115 +-------------------------
 1 file changed, 1 insertion(+), 114 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 5bfd5152df..569d485c2c 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -16,120 +16,7 @@
 static __rte_always_inline void
 ice_rxq_rearm(struct ice_rx_queue *rxq)
 {
-	int i;
-	uint16_t rx_id;
-	volatile union ice_rx_flex_desc *rxdp;
-	struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
-	struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
-			rte_lcore_id());
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	if (unlikely(!cache))
-		return ice_rxq_rearm_common(rxq, true);
-
-	/* We need to pull 'n' more MBUFs into the software ring */
-	if (cache->len < ICE_RXQ_REARM_THRESH) {
-		uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
-				cache->len);
-
-		int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
-				&cache->objs[cache->len], req);
-		if (ret == 0) {
-			cache->len += req;
-		} else {
-			if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
-			    rxq->nb_rx_desc) {
-				__m128i dma_addr0;
-
-				dma_addr0 = _mm_setzero_si128();
-				for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
-					rxep[i].mbuf = &rxq->fake_mbuf;
-					_mm_store_si128
-						((__m128i *)&rxdp[i].read,
-							dma_addr0);
-				}
-			}
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-				ICE_RXQ_REARM_THRESH;
-			return;
-		}
-	}
-
-	const __m512i iova_offsets =  _mm512_set1_epi64
-		(offsetof(struct rte_mbuf, buf_iova));
-	const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
-
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-	/* shuffle the iova into correct slots. Values 4-7 will contain
-	 * zeros, so use 7 for a zero-value.
-	 */
-	const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
-#else
-	const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
-#endif
-
-	/* fill up the rxd in vector, process 8 mbufs in one loop */
-	for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) {
-		const __m512i mbuf_ptrs = _mm512_loadu_si512
-			(&cache->objs[cache->len - 8]);
-		_mm512_store_si512(rxep, mbuf_ptrs);
-
-		/* gather iova of mbuf0-7 into one zmm reg */
-		const __m512i iova_base_addrs = _mm512_i64gather_epi64
-			(_mm512_add_epi64(mbuf_ptrs, iova_offsets),
-				0, /* base */
-				1  /* scale */);
-		const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
-				headroom);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		const __m512i iovas0 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 0));
-		const __m512i iovas1 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 1));
-
-		/* permute leaves iova 2-3 in hdr_addr of desc 0-1
-		 * but these are ignored by driver since header split not
-		 * enabled. Similarly for desc 4 & 5.
-		 */
-		const __m512i desc0_1 = _mm512_permutexvar_epi64
-			(permute_idx, iovas0);
-		const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
-
-		const __m512i desc4_5 = _mm512_permutexvar_epi64
-			(permute_idx, iovas1);
-		const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
-
-		_mm512_store_si512((void *)rxdp, desc0_1);
-		_mm512_store_si512((void *)(rxdp + 2), desc2_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc4_5);
-		_mm512_store_si512((void *)(rxdp + 6), desc6_7);
-#else
-		/* permute leaves iova 4-7 in hdr_addr of desc 0-3
-		 * but these are ignored by driver since header split not
-		 * enabled.
-		 */
-		const __m512i desc0_3 = _mm512_permutexvar_epi64
-			(permute_idx, iova_addrs);
-		const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
-
-		_mm512_store_si512((void *)rxdp, desc0_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc4_7);
-#endif
-		rxep += 8, rxdp += 8, cache->len -= 8;
-	}
-
-	rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
-			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+	ice_rxq_rearm_common(rxq, true);
 }
 
 static inline __m256i
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] net/i40e: remove avx512 specific Rx queue rearm code
  2023-02-07  6:38 [PATCH] net/i40e: remove avx512 specific Rx queue rearm code Wenzhuo Lu
  2023-02-07  6:38 ` [PATCH] net/iavf: " Wenzhuo Lu
  2023-02-07  6:39 ` [PATCH] net/ice: " Wenzhuo Lu
@ 2023-02-07  6:39 ` Wenzhuo Lu
  2023-02-08  3:01   ` Zhang, Qi Z
  2 siblings, 1 reply; 8+ messages in thread
From: Wenzhuo Lu @ 2023-02-07  6:39 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

'i40e_rxq_rearm' in avx512 path is optimized to improve the performance.
But after the commit a2833ecc5ea4 ("mempool: fix get objects from mempool
with cache"), this avx512 specific optimization is not necessary.
This patch remove the unnecessary PMD specific optimization to make the
code easier to maintain and get the benefit from the enhancement of common
lib.

Reported-by: Haijun Chu <haijun.chu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
 drivers/net/i40e/i40e_rxtx_vec_avx512.c | 125 +-----------------------
 1 file changed, 1 insertion(+), 124 deletions(-)

diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 60c97d5331..d3c7bfd121 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -24,130 +24,7 @@
 static __rte_always_inline void
 i40e_rxq_rearm(struct i40e_rx_queue *rxq)
 {
-	int i;
-	uint16_t rx_id;
-	volatile union i40e_rx_desc *rxdp;
-	struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
-	struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
-			rte_lcore_id());
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	if (unlikely(!cache))
-		return i40e_rxq_rearm_common(rxq, true);
-
-	/* We need to pull 'n' more MBUFs into the software ring from mempool
-	 * We inline the mempool function here, so we can vectorize the copy
-	 * from the cache into the shadow ring.
-	 */
-
-	if (cache->len < RTE_I40E_RXQ_REARM_THRESH) {
-		/* No. Backfill the cache first, and then fill from it */
-		uint32_t req = RTE_I40E_RXQ_REARM_THRESH + (cache->size -
-				cache->len);
-
-		/* How many do we require
-		 * i.e. number to fill the cache + the request
-		 */
-		int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
-				&cache->objs[cache->len], req);
-		if (ret == 0) {
-			cache->len += req;
-		} else {
-			if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
-					rxq->nb_rx_desc) {
-				__m128i dma_addr0;
-
-				dma_addr0 = _mm_setzero_si128();
-				for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
-					rxep[i].mbuf = &rxq->fake_mbuf;
-					_mm_store_si128
-						((__m128i *)&rxdp[i].read,
-							dma_addr0);
-				}
-			}
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-					RTE_I40E_RXQ_REARM_THRESH;
-			return;
-		}
-	}
-
-	const __m512i iova_offsets =  _mm512_set1_epi64
-		(offsetof(struct rte_mbuf, buf_iova));
-	const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
-
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-	/* to shuffle the addresses to correct slots. Values 4-7 will contain
-	 * zeros, so use 7 for a zero-value.
-	 */
-	const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
-#else
-	const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
-#endif
-
-	/* Initialize the mbufs in vector, process 8 mbufs in one loop, taking
-	 * from mempool cache and populating both shadow and HW rings
-	 */
-	for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH / 8; i++) {
-		const __m512i mbuf_ptrs = _mm512_loadu_si512
-			(&cache->objs[cache->len - 8]);
-		_mm512_store_si512(rxep, mbuf_ptrs);
-
-		/* gather iova of mbuf0-7 into one zmm reg */
-		const __m512i iova_base_addrs = _mm512_i64gather_epi64
-			(_mm512_add_epi64(mbuf_ptrs, iova_offsets),
-				0, /* base */
-				1 /* scale */);
-		const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
-				headroom);
-#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-		const __m512i iovas0 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 0));
-		const __m512i iovas1 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 1));
-
-		/* permute leaves desc 2-3 addresses in header address slots 0-1
-		 * but these are ignored by driver since header split not
-		 * enabled. Similarly for desc 4 & 5.
-		 */
-		const __m512i desc_rd_0_1 = _mm512_permutexvar_epi64
-			(permute_idx, iovas0);
-		const __m512i desc_rd_2_3 = _mm512_bsrli_epi128(desc_rd_0_1, 8);
-
-		const __m512i desc_rd_4_5 = _mm512_permutexvar_epi64
-			(permute_idx, iovas1);
-		const __m512i desc_rd_6_7 = _mm512_bsrli_epi128(desc_rd_4_5, 8);
-
-		_mm512_store_si512((void *)rxdp, desc_rd_0_1);
-		_mm512_store_si512((void *)(rxdp + 2), desc_rd_2_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc_rd_4_5);
-		_mm512_store_si512((void *)(rxdp + 6), desc_rd_6_7);
-#else
-		/* permute leaves desc 4-7 addresses in header address slots 0-3
-		 * but these are ignored by driver since header split not
-		 * enabled.
-		 */
-		const __m512i desc_rd_0_3 = _mm512_permutexvar_epi64
-			(permute_idx, iova_addrs);
-		const __m512i desc_rd_4_7 = _mm512_bsrli_epi128(desc_rd_0_3, 8);
-
-		_mm512_store_si512((void *)rxdp, desc_rd_0_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc_rd_4_7);
-#endif
-		rxep += 8, rxdp += 8, cache->len -= 8;
-	}
-
-	rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
-			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+	return i40e_rxq_rearm_common(rxq, true);
 }
 
 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2] net/ice: remove avx512 specific Rx queue rearm code
  2023-02-07  6:39 ` [PATCH] net/ice: " Wenzhuo Lu
@ 2023-02-08  2:59   ` Wenzhuo Lu
  2023-02-08  3:42     ` Zhang, Qi Z
  0 siblings, 1 reply; 8+ messages in thread
From: Wenzhuo Lu @ 2023-02-08  2:59 UTC (permalink / raw)
  To: dev; +Cc: Wenzhuo Lu

'ice_rxq_rearm' in avx512 path is optimized to improve the performance.
But after the commit a2833ecc5ea4 ("mempool: fix get objects from mempool
with cache"), this avx512 specific optimization is not necessary.
This patch remove the unnecessary PMD specific optimization to make the
code easier to maintain and get the benefit from the enhancement of common
lib.

Reported-by: Haijun Chu <haijun.chu@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
v2:
 - Rebased on dpdk-next-net-intel

 drivers/net/ice/ice_rxtx_vec_avx512.c | 120 +-------------------------
 1 file changed, 1 insertion(+), 119 deletions(-)

diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c
index 7e388b7569..c3b087c52e 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -16,125 +16,7 @@
 static __rte_always_inline void
 ice_rxq_rearm(struct ice_rx_queue *rxq)
 {
-	int i;
-	uint16_t rx_id;
-	volatile union ice_rx_flex_desc *rxdp;
-	struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
-	struct rte_mempool_cache *cache = rte_mempool_default_cache(rxq->mp,
-			rte_lcore_id());
-
-	rxdp = rxq->rx_ring + rxq->rxrearm_start;
-
-	if (unlikely(!cache))
-		return ice_rxq_rearm_common(rxq, true);
-
-	/* We need to pull 'n' more MBUFs into the software ring */
-	if (cache->len < ICE_RXQ_REARM_THRESH) {
-		uint32_t req = ICE_RXQ_REARM_THRESH + (cache->size -
-				cache->len);
-
-		int ret = rte_mempool_ops_dequeue_bulk(rxq->mp,
-				&cache->objs[cache->len], req);
-		if (ret == 0) {
-			cache->len += req;
-		} else {
-			if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >=
-			    rxq->nb_rx_desc) {
-				__m128i dma_addr0;
-
-				dma_addr0 = _mm_setzero_si128();
-				for (i = 0; i < ICE_DESCS_PER_LOOP; i++) {
-					rxep[i].mbuf = &rxq->fake_mbuf;
-					_mm_store_si128
-						((__m128i *)&rxdp[i].read,
-							dma_addr0);
-				}
-			}
-			rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
-				ICE_RXQ_REARM_THRESH;
-			return;
-		}
-	}
-
-#if RTE_IOVA_AS_PA
-	const __m512i iova_offsets =  _mm512_set1_epi64
-		(offsetof(struct rte_mbuf, buf_iova));
-#else
-	const __m512i iova_offsets =  _mm512_set1_epi64
-		(offsetof(struct rte_mbuf, buf_addr));
-#endif
-	const __m512i headroom = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
-
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-	/* shuffle the iova into correct slots. Values 4-7 will contain
-	 * zeros, so use 7 for a zero-value.
-	 */
-	const __m512i permute_idx = _mm512_set_epi64(7, 7, 3, 1, 7, 7, 2, 0);
-#else
-	const __m512i permute_idx = _mm512_set_epi64(7, 3, 6, 2, 5, 1, 4, 0);
-#endif
-
-	/* fill up the rxd in vector, process 8 mbufs in one loop */
-	for (i = 0; i < ICE_RXQ_REARM_THRESH / 8; i++) {
-		const __m512i mbuf_ptrs = _mm512_loadu_si512
-			(&cache->objs[cache->len - 8]);
-		_mm512_store_si512(rxep, mbuf_ptrs);
-
-		/* gather iova of mbuf0-7 into one zmm reg */
-		const __m512i iova_base_addrs = _mm512_i64gather_epi64
-			(_mm512_add_epi64(mbuf_ptrs, iova_offsets),
-				0, /* base */
-				1  /* scale */);
-		const __m512i iova_addrs = _mm512_add_epi64(iova_base_addrs,
-				headroom);
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-		const __m512i iovas0 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 0));
-		const __m512i iovas1 = _mm512_castsi256_si512
-			(_mm512_extracti64x4_epi64(iova_addrs, 1));
-
-		/* permute leaves iova 2-3 in hdr_addr of desc 0-1
-		 * but these are ignored by driver since header split not
-		 * enabled. Similarly for desc 4 & 5.
-		 */
-		const __m512i desc0_1 = _mm512_permutexvar_epi64
-			(permute_idx, iovas0);
-		const __m512i desc2_3 = _mm512_bsrli_epi128(desc0_1, 8);
-
-		const __m512i desc4_5 = _mm512_permutexvar_epi64
-			(permute_idx, iovas1);
-		const __m512i desc6_7 = _mm512_bsrli_epi128(desc4_5, 8);
-
-		_mm512_store_si512((void *)rxdp, desc0_1);
-		_mm512_store_si512((void *)(rxdp + 2), desc2_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc4_5);
-		_mm512_store_si512((void *)(rxdp + 6), desc6_7);
-#else
-		/* permute leaves iova 4-7 in hdr_addr of desc 0-3
-		 * but these are ignored by driver since header split not
-		 * enabled.
-		 */
-		const __m512i desc0_3 = _mm512_permutexvar_epi64
-			(permute_idx, iova_addrs);
-		const __m512i desc4_7 = _mm512_bsrli_epi128(desc0_3, 8);
-
-		_mm512_store_si512((void *)rxdp, desc0_3);
-		_mm512_store_si512((void *)(rxdp + 4), desc4_7);
-#endif
-		rxep += 8, rxdp += 8, cache->len -= 8;
-	}
-
-	rxq->rxrearm_start += ICE_RXQ_REARM_THRESH;
-	if (rxq->rxrearm_start >= rxq->nb_rx_desc)
-		rxq->rxrearm_start = 0;
-
-	rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
-			     (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
+	ice_rxq_rearm_common(rxq, true);
 }
 
 static inline __m256i
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH] net/i40e: remove avx512 specific Rx queue rearm code
  2023-02-07  6:39 ` [PATCH] net/i40e: " Wenzhuo Lu
@ 2023-02-08  3:01   ` Zhang, Qi Z
  0 siblings, 0 replies; 8+ messages in thread
From: Zhang, Qi Z @ 2023-02-08  3:01 UTC (permalink / raw)
  To: Lu, Wenzhuo, dev; +Cc: Lu, Wenzhuo



> -----Original Message-----
> From: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Sent: Tuesday, February 7, 2023 2:39 PM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: [PATCH] net/i40e: remove avx512 specific Rx queue rearm code
> 
> 'i40e_rxq_rearm' in avx512 path is optimized to improve the performance.
> But after the commit a2833ecc5ea4 ("mempool: fix get objects from
> mempool with cache"), this avx512 specific optimization is not necessary.
> This patch remove the unnecessary PMD specific optimization to make the
> code easier to maintain and get the benefit from the enhancement of
> common lib.
> 
> Reported-by: Haijun Chu <haijun.chu@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH] net/iavf: remove avx512 specific Rx queue rearm code
  2023-02-07  6:38 ` [PATCH] net/iavf: " Wenzhuo Lu
@ 2023-02-08  3:14   ` Zhang, Qi Z
  0 siblings, 0 replies; 8+ messages in thread
From: Zhang, Qi Z @ 2023-02-08  3:14 UTC (permalink / raw)
  To: Lu, Wenzhuo, dev; +Cc: Lu, Wenzhuo



> -----Original Message-----
> From: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Sent: Tuesday, February 7, 2023 2:39 PM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: [PATCH] net/iavf: remove avx512 specific Rx queue rearm code
> 
> 'iavf_rxq_rearm' in avx512 path is optimized to improve the performance.
> But after the commit a2833ecc5ea4 ("mempool: fix get objects from
> mempool with cache"), this avx512 specific optimization is not necessary.
> This patch remove the unnecessary PMD specific optimization to make the
> code easier to maintain and get the benefit from the enhancement of
> common lib.
> 
> Reported-by: Haijun Chu <haijun.chu@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [PATCH v2] net/ice: remove avx512 specific Rx queue rearm code
  2023-02-08  2:59   ` [PATCH v2] " Wenzhuo Lu
@ 2023-02-08  3:42     ` Zhang, Qi Z
  0 siblings, 0 replies; 8+ messages in thread
From: Zhang, Qi Z @ 2023-02-08  3:42 UTC (permalink / raw)
  To: Lu, Wenzhuo, dev; +Cc: Lu, Wenzhuo



> -----Original Message-----
> From: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Sent: Wednesday, February 8, 2023 10:59 AM
> To: dev@dpdk.org
> Cc: Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: [PATCH v2] net/ice: remove avx512 specific Rx queue rearm code
> 
> 'ice_rxq_rearm' in avx512 path is optimized to improve the performance.
> But after the commit a2833ecc5ea4 ("mempool: fix get objects from
> mempool with cache"), this avx512 specific optimization is not necessary.
> This patch remove the unnecessary PMD specific optimization to make the
> code easier to maintain and get the benefit from the enhancement of
> common lib.
> 
> Reported-by: Haijun Chu <haijun.chu@intel.com>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-02-08  3:43 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-07  6:38 [PATCH] net/i40e: remove avx512 specific Rx queue rearm code Wenzhuo Lu
2023-02-07  6:38 ` [PATCH] net/iavf: " Wenzhuo Lu
2023-02-08  3:14   ` Zhang, Qi Z
2023-02-07  6:39 ` [PATCH] net/ice: " Wenzhuo Lu
2023-02-08  2:59   ` [PATCH v2] " Wenzhuo Lu
2023-02-08  3:42     ` Zhang, Qi Z
2023-02-07  6:39 ` [PATCH] net/i40e: " Wenzhuo Lu
2023-02-08  3:01   ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).