DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shaiq Wani <shaiq.wani@intel.com>
To: dev@dpdk.org, bruce.richardson@intel.com, aman.deep.singh@intel.com
Subject: [PATCH v4 1/2] net/idpf: enable AVX2 for split queue Rx
Date: Tue, 30 Sep 2025 14:37:08 +0530	[thread overview]
Message-ID: <20250930090709.2521114-2-shaiq.wani@intel.com> (raw)
In-Reply-To: <20250930090709.2521114-1-shaiq.wani@intel.com>

In case some CPUs don't support AVX512. Enable AVX2 for them to
get better per-core performance.

In the single queue model, the same descriptor queue is used by SW
to post descriptors to the device and used by device to report completed
descriptors to SW. While as the split queue model separates them into
different queues for parallel processing and improved performance.

Signed-off-by: Shaiq Wani <shaiq.wani@intel.com>
---
 drivers/net/intel/idpf/idpf_common_device.h   |   1 +
 drivers/net/intel/idpf/idpf_common_rxtx.c     |  64 +++++++
 drivers/net/intel/idpf/idpf_common_rxtx.h     |   5 +
 .../net/intel/idpf/idpf_common_rxtx_avx2.c    | 160 ++++++++++++++++++
 .../net/intel/idpf/idpf_common_rxtx_avx512.c  |  56 ------
 5 files changed, 230 insertions(+), 56 deletions(-)

diff --git a/drivers/net/intel/idpf/idpf_common_device.h b/drivers/net/intel/idpf/idpf_common_device.h
index 3b95d519c6..ed459e6f54 100644
--- a/drivers/net/intel/idpf/idpf_common_device.h
+++ b/drivers/net/intel/idpf/idpf_common_device.h
@@ -49,6 +49,7 @@ enum idpf_rx_func_type {
 	IDPF_RX_SINGLEQ,
 	IDPF_RX_SINGLEQ_SCATTERED,
 	IDPF_RX_SINGLEQ_AVX2,
+	IDPF_RX_AVX2,
 	IDPF_RX_AVX512,
 	IDPF_RX_SINGLQ_AVX512,
 	IDPF_RX_MAX
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.c b/drivers/net/intel/idpf/idpf_common_rxtx.c
index a2b8c372d6..b00bf69a25 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.c
@@ -250,6 +250,63 @@ idpf_qc_split_tx_complq_reset(struct ci_tx_queue *cq)
 	cq->expected_gen_id = 1;
 }
 
+RTE_EXPORT_INTERNAL_SYMBOL(idpf_splitq_rearm_common)
+void
+idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
+{
+	struct rte_mbuf **rxp = &rx_bufq->sw_ring[rx_bufq->rxrearm_start];
+	volatile union virtchnl2_rx_buf_desc *rxdp = rx_bufq->rx_ring;
+	uint16_t rx_id;
+	int i;
+
+	rxdp += rx_bufq->rxrearm_start;
+
+	/* Pull 'n' more MBUFs into the software ring */
+	if (rte_mbuf_raw_alloc_bulk(rx_bufq->mp,
+				 (void *)rxp,
+				 IDPF_RXQ_REARM_THRESH) < 0) {
+		if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
+		    rx_bufq->nb_rx_desc) {
+			__m128i dma_addr0;
+
+			dma_addr0 = _mm_setzero_si128();
+			for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
+				rxp[i] = &rx_bufq->fake_mbuf;
+				_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]),
+						dma_addr0);
+			}
+		}
+	rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
+			   IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
+		return;
+	}
+
+	/* Initialize the mbufs in vector, process 8 mbufs in one loop */
+	for (i = 0; i < IDPF_RXQ_REARM_THRESH;
+			i += 8, rxp += 8, rxdp += 8) {
+		rxdp[0].split_rd.pkt_addr = rxp[0]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[1].split_rd.pkt_addr = rxp[1]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[2].split_rd.pkt_addr = rxp[2]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[3].split_rd.pkt_addr = rxp[3]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[4].split_rd.pkt_addr = rxp[4]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[5].split_rd.pkt_addr = rxp[5]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[6].split_rd.pkt_addr = rxp[6]->buf_iova + RTE_PKTMBUF_HEADROOM;
+		rxdp[7].split_rd.pkt_addr = rxp[7]->buf_iova + RTE_PKTMBUF_HEADROOM;
+	}
+
+	rx_bufq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
+	if (rx_bufq->rxrearm_start >= rx_bufq->nb_rx_desc)
+		rx_bufq->rxrearm_start = 0;
+
+	rx_bufq->rxrearm_nb -= IDPF_RXQ_REARM_THRESH;
+
+	rx_id = (uint16_t)((rx_bufq->rxrearm_start == 0) ?
+			     (rx_bufq->nb_rx_desc - 1) : (rx_bufq->rxrearm_start - 1));
+
+	/* Update the tail pointer on the NIC */
+	IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, rx_id);
+}
+
 RTE_EXPORT_INTERNAL_SYMBOL(idpf_qc_single_tx_queue_reset)
 void
 idpf_qc_single_tx_queue_reset(struct ci_tx_queue *txq)
@@ -1656,6 +1713,13 @@ const struct ci_rx_path_info idpf_rx_path_infos[] = {
 			.rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
 			.simd_width = RTE_VECT_SIMD_256,
 			.extra.single_queue = true}},
+	[IDPF_RX_AVX2] = {
+	    .pkt_burst = idpf_dp_splitq_recv_pkts_avx2,
+	    .info = "Split AVX2 Vector",
+	    .features = {
+			.rx_offloads = IDPF_RX_VECTOR_OFFLOADS,
+			.simd_width = RTE_VECT_SIMD_256,
+	       }},
 #ifdef CC_AVX512_SUPPORT
 	[IDPF_RX_AVX512] = {
 		.pkt_burst = idpf_dp_splitq_recv_pkts_avx512,
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx.h b/drivers/net/intel/idpf/idpf_common_rxtx.h
index 3bc3323af4..87f6895c4c 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx.h
+++ b/drivers/net/intel/idpf/idpf_common_rxtx.h
@@ -203,6 +203,8 @@ void idpf_qc_split_tx_descq_reset(struct ci_tx_queue *txq);
 __rte_internal
 void idpf_qc_split_tx_complq_reset(struct ci_tx_queue *cq);
 __rte_internal
+void idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq);
+__rte_internal
 void idpf_qc_single_tx_queue_reset(struct ci_tx_queue *txq);
 __rte_internal
 void idpf_qc_rx_queue_release(void *rxq);
@@ -252,6 +254,9 @@ __rte_internal
 uint16_t idpf_dp_splitq_xmit_pkts_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 					 uint16_t nb_pkts);
 __rte_internal
+uint16_t idpf_dp_splitq_recv_pkts_avx2(void *rxq, struct rte_mbuf **rx_pkts,
+				     uint16_t nb_pkts);
+__rte_internal
 uint16_t idpf_dp_singleq_recv_scatter_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 			  uint16_t nb_pkts);
 __rte_internal
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
index 21c8f79254..986fffdbc1 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx2.c
@@ -482,6 +482,166 @@ idpf_dp_singleq_recv_pkts_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16
 	return _idpf_singleq_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts);
 }
 
+uint16_t
+idpf_dp_splitq_recv_pkts_avx2(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct idpf_rx_queue *queue = (struct idpf_rx_queue *)rxq;
+	const uint32_t *ptype_tbl = queue->adapter->ptype_tbl;
+	struct rte_mbuf **sw_ring = &queue->bufq2->sw_ring[queue->rx_tail];
+	volatile union virtchnl2_rx_desc *rxdp =
+		(volatile union virtchnl2_rx_desc *)queue->rx_ring + queue->rx_tail;
+	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0, queue->mbuf_initializer);
+
+	rte_prefetch0(rxdp);
+	nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, 4); /* 4 desc per AVX2 iteration */
+
+	if (queue->bufq2->rxrearm_nb > IDPF_RXQ_REARM_THRESH)
+		idpf_splitq_rearm_common(queue->bufq2);
+
+	/* head gen check */
+	uint64_t head_gen = rxdp->flex_adv_nic_3_wb.pktlen_gen_bufq_id;
+	if (((head_gen >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
+		 VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M) != queue->expected_gen_id)
+		return 0;
+
+	uint16_t received = 0;
+
+	/* Shuffle mask: picks fields from each 16-byte descriptor pair into the
+	 * layout that will be merged into mbuf->rearm_data candidates.
+	 */
+	const __m256i shuf = _mm256_set_epi8(
+		/* high 128 bits (desc 3 then desc 2 lanes) */
+		(char)0xFF, (char)0xFF, (char)0xFF, (char)0xFF, 11, 10, 5, 4,
+		(char)0xFF, (char)0xFF, 5, 4, (char)0xFF, (char)0xFF, (char)0xFF, (char)0xFF,
+		/* low 128 bits (desc 1 then desc 0 lanes) */
+		(char)0xFF, (char)0xFF, (char)0xFF, (char)0xFF, 11, 10, 5, 4,
+		(char)0xFF, (char)0xFF, 5, 4, (char)0xFF, (char)0xFF, (char)0xFF, (char)0xFF
+	);
+
+	/* mask that clears the high 16 bits of packet length word */
+	const __m256i len_mask = _mm256_set_epi32(
+		0xffffffff, 0xffffffff, 0xffff3fff, 0xffffffff,
+		0xffffffff, 0xffffffff, 0xffff3fff, 0xffffffff
+	);
+
+	const __m256i ptype_mask = _mm256_set1_epi16(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M);
+
+	for (uint16_t i = 0; i < nb_pkts; i += 4, rxdp += 4) {
+		/* Step 1: copy 4 mbuf pointers (64-bit each) into rx_pkts[] */
+		__m128i ptrs_lo = _mm_loadu_si128((const __m128i *)&sw_ring[i]);
+		__m128i ptrs_hi = _mm_loadu_si128((const __m128i *)&sw_ring[i + 2]);
+		_mm_storeu_si128((__m128i *)&rx_pkts[i], ptrs_lo);
+		_mm_storeu_si128((__m128i *)&rx_pkts[i + 2], ptrs_hi);
+
+		/* Step 2: load four 128-bit descriptors */
+		__m128i d0 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[0]));
+		rte_compiler_barrier();
+		__m128i d1 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[1]));
+		rte_compiler_barrier();
+		__m128i d2 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[2]));
+		rte_compiler_barrier();
+		__m128i d3 = _mm_load_si128(RTE_CAST_PTR(const __m128i *, &rxdp[3]));
+
+		/* Build 256-bit descriptor-pairs */
+		__m256i d01 = _mm256_set_m128i(d1, d0); /* low lane: d0, d1 */
+		__m256i d23 = _mm256_set_m128i(d3, d2); /* high lane: d2, d3 */
+
+		/* mask off high pkt_len bits  */
+		__m256i desc01 = _mm256_and_si256(d01, len_mask);
+		__m256i desc23 = _mm256_and_si256(d23, len_mask);
+
+		/* Step 3: shuffle relevant bytes into mbuf rearm candidates */
+		__m256i mb01 = _mm256_shuffle_epi8(desc01, shuf);
+		__m256i mb23 = _mm256_shuffle_epi8(desc23, shuf);
+
+		/* Step 4: extract ptypes from descriptors and translate via table */
+		__m256i pt01 = _mm256_and_si256(d01, ptype_mask);
+		__m256i pt23 = _mm256_and_si256(d23, ptype_mask);
+
+		uint16_t ptype0 = (uint16_t)_mm256_extract_epi16(pt01, 1);
+		uint16_t ptype1 = (uint16_t)_mm256_extract_epi16(pt01, 9);
+		uint16_t ptype2 = (uint16_t)_mm256_extract_epi16(pt23, 1);
+		uint16_t ptype3 = (uint16_t)_mm256_extract_epi16(pt23, 9);
+
+		mb01 = _mm256_insert_epi32(mb01, (int)ptype_tbl[ptype1], 2);
+		mb01 = _mm256_insert_epi32(mb01, (int)ptype_tbl[ptype0], 0);
+		mb23 = _mm256_insert_epi32(mb23, (int)ptype_tbl[ptype3], 2);
+		mb23 = _mm256_insert_epi32(mb23, (int)ptype_tbl[ptype2], 0);
+
+		/* Step 5: build rearm vectors */
+		__m128i mb01_lo = _mm256_castsi256_si128(mb01);
+		__m128i mb01_hi = _mm256_extracti128_si256(mb01, 1);
+		__m128i mb23_lo = _mm256_castsi256_si128(mb23);
+		__m128i mb23_hi = _mm256_extracti128_si256(mb23, 1);
+
+		__m256i rearm0 = _mm256_permute2f128_si256(mbuf_init, _mm256_set_m128i
+							(mb01_hi, mb01_lo), 0x20);
+		__m256i rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_set_m128i
+							(mb01_hi, mb01_lo), 0xF0);
+		__m256i rearm2 = _mm256_permute2f128_si256(mbuf_init, _mm256_set_m128i
+							(mb23_hi, mb23_lo), 0x20);
+		__m256i rearm3 = _mm256_blend_epi32(mbuf_init, _mm256_set_m128i
+							(mb23_hi, mb23_lo), 0xF0);
+
+		/* Step 6: per-descriptor scalar validity checks */
+		bool valid0 = false, valid1 = false, valid2 = false, valid3 = false;
+		{
+			uint64_t g0 = rxdp[0].flex_adv_nic_3_wb.pktlen_gen_bufq_id;
+			uint64_t g1 = rxdp[1].flex_adv_nic_3_wb.pktlen_gen_bufq_id;
+			uint64_t g2 = rxdp[2].flex_adv_nic_3_wb.pktlen_gen_bufq_id;
+			uint64_t g3 = rxdp[3].flex_adv_nic_3_wb.pktlen_gen_bufq_id;
+
+			bool dd0 = (g0 & 1ULL) != 0ULL;
+			bool dd1 = (g1 & 1ULL) != 0ULL;
+			bool dd2 = (g2 & 1ULL) != 0ULL;
+			bool dd3 = (g3 & 1ULL) != 0ULL;
+
+			uint64_t gen0 = (g0 >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
+						VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M;
+			uint64_t gen1 = (g1 >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
+						VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M;
+			uint64_t gen2 = (g2 >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
+						VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M;
+			uint64_t gen3 = (g3 >> VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S) &
+						VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M;
+
+			valid0 = dd0 && (gen0 == queue->expected_gen_id);
+			valid1 = dd1 && (gen1 == queue->expected_gen_id);
+			valid2 = dd2 && (gen2 == queue->expected_gen_id);
+			valid3 = dd3 && (gen3 == queue->expected_gen_id);
+		}
+
+		unsigned int mask =	(valid0 ? 1U : 0U) | (valid1 ? 2U : 0U)
+						| (valid2 ? 4U : 0U) | (valid3 ? 8U : 0U);
+		uint16_t burst = (uint16_t)__builtin_popcount(mask);
+
+		/* Step 7: store rearm_data only for validated descriptors */
+		if (valid0)
+			_mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0);
+		if (valid1)
+			_mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1);
+		if (valid2)
+			_mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2);
+		if (valid3)
+			_mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3);
+
+		received += burst;
+		if (burst != 4)
+			break;
+	}
+	queue->rx_tail += received;
+	queue->expected_gen_id ^= ((queue->rx_tail & queue->nb_rx_desc) != 0);
+	queue->rx_tail &= (queue->nb_rx_desc - 1);
+	if ((queue->rx_tail & 1) == 1 && received > 1) {
+		queue->rx_tail--;
+		received--;
+	}
+	queue->bufq2->rxrearm_nb += received;
+	return received;
+}
+
+RTE_EXPORT_INTERNAL_SYMBOL(idpf_dp_splitq_recv_pkts_avx2)
+
 static inline void
 idpf_singleq_vtx1(volatile struct idpf_base_tx_desc *txdp,
 		  struct rte_mbuf *pkt, uint64_t flags)
diff --git a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
index bc2cadd738..d3a161c763 100644
--- a/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
+++ b/drivers/net/intel/idpf/idpf_common_rxtx_avx512.c
@@ -540,62 +540,6 @@ idpf_dp_singleq_recv_pkts_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return _idpf_singleq_recv_raw_pkts_avx512(rx_queue, rx_pkts, nb_pkts);
 }
 
-static __rte_always_inline void
-idpf_splitq_rearm_common(struct idpf_rx_queue *rx_bufq)
-{
-	struct rte_mbuf **rxp = &rx_bufq->sw_ring[rx_bufq->rxrearm_start];
-	volatile union virtchnl2_rx_buf_desc *rxdp = rx_bufq->rx_ring;
-	uint16_t rx_id;
-	int i;
-
-	rxdp += rx_bufq->rxrearm_start;
-
-	/* Pull 'n' more MBUFs into the software ring */
-	if (rte_mbuf_raw_alloc_bulk(rx_bufq->mp,
-				 (void *)rxp,
-				 IDPF_RXQ_REARM_THRESH) < 0) {
-		if (rx_bufq->rxrearm_nb + IDPF_RXQ_REARM_THRESH >=
-		    rx_bufq->nb_rx_desc) {
-			__m128i dma_addr0;
-
-			dma_addr0 = _mm_setzero_si128();
-			for (i = 0; i < IDPF_VPMD_DESCS_PER_LOOP; i++) {
-				rxp[i] = &rx_bufq->fake_mbuf;
-				_mm_store_si128(RTE_CAST_PTR(__m128i *, &rxdp[i]),
-						dma_addr0);
-			}
-		}
-	rte_atomic_fetch_add_explicit(&rx_bufq->rx_stats.mbuf_alloc_failed,
-			   IDPF_RXQ_REARM_THRESH, rte_memory_order_relaxed);
-		return;
-	}
-
-	/* Initialize the mbufs in vector, process 8 mbufs in one loop */
-	for (i = 0; i < IDPF_RXQ_REARM_THRESH;
-			i += 8, rxp += 8, rxdp += 8) {
-		rxdp[0].split_rd.pkt_addr = rxp[0]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[1].split_rd.pkt_addr = rxp[1]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[2].split_rd.pkt_addr = rxp[2]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[3].split_rd.pkt_addr = rxp[3]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[4].split_rd.pkt_addr = rxp[4]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[5].split_rd.pkt_addr = rxp[5]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[6].split_rd.pkt_addr = rxp[6]->buf_iova + RTE_PKTMBUF_HEADROOM;
-		rxdp[7].split_rd.pkt_addr = rxp[7]->buf_iova + RTE_PKTMBUF_HEADROOM;
-	}
-
-	rx_bufq->rxrearm_start += IDPF_RXQ_REARM_THRESH;
-	if (rx_bufq->rxrearm_start >= rx_bufq->nb_rx_desc)
-		rx_bufq->rxrearm_start = 0;
-
-	rx_bufq->rxrearm_nb -= IDPF_RXQ_REARM_THRESH;
-
-	rx_id = (uint16_t)((rx_bufq->rxrearm_start == 0) ?
-			     (rx_bufq->nb_rx_desc - 1) : (rx_bufq->rxrearm_start - 1));
-
-	/* Update the tail pointer on the NIC */
-	IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, rx_id);
-}
-
 static __rte_always_inline void
 idpf_splitq_rearm(struct idpf_rx_queue *rx_bufq)
 {
-- 
2.34.1


  reply	other threads:[~2025-09-30  9:07 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20250917052658.582872-1-shaiq.wani@intel.com/>
2025-09-25  9:20 ` [PATCH v2 0/2] net/idpf: enable AVX2 for split queue Rx/Tx Shaiq Wani
2025-09-25  9:20   ` [PATCH v2 1/2] net/idpf: enable AVX2 for split queue Rx Shaiq Wani
2025-09-25 16:38     ` Bruce Richardson
2025-09-25  9:20   ` [PATCH v2 2/2] net/idpf: enable AVX2 for split queue Tx Shaiq Wani
2025-09-25 16:47     ` Bruce Richardson
2025-09-26  8:54 ` [PATCH v3 0/2] enable AVX2 for split queue Rx/Tx Shaiq Wani
2025-09-26  8:54   ` [PATCH v3 1/2] net/idpf: enable AVX2 for split queue Rx Shaiq Wani
2025-09-26 11:40     ` Bruce Richardson
2025-09-26 13:09     ` Burakov, Anatoly
2025-09-26  8:54   ` [PATCH v3 2/2] net/idpf: enable AVX2 for split queue Tx Shaiq Wani
2025-09-30  9:07 ` [PATCH v4 0/2] net/idpf: enable AVX2 for split queue Rx/Tx Shaiq Wani
2025-09-30  9:07   ` Shaiq Wani [this message]
2025-09-30 13:28     ` [PATCH v4 1/2] net/idpf: enable AVX2 for split queue Rx Burakov, Anatoly
2025-09-30  9:07   ` [PATCH v4 2/2] net/idpf: enable AVX2 for split queue Tx Shaiq Wani
2025-09-30 13:28     ` Burakov, Anatoly

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250930090709.2521114-2-shaiq.wani@intel.com \
    --to=shaiq.wani@intel.com \
    --cc=aman.deep.singh@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).