From: Anatoly Burakov <anatoly.burakov@intel.com>
To: dev@dpdk.org, Bruce Richardson <bruce.richardson@intel.com>
Subject: [PATCH v3 11/13] net/intel: support wider x86 vectors for Rx rearm
Date: Mon, 12 May 2025 13:54:37 +0100 [thread overview]
Message-ID: <ee702becb1f88facce490c19ac8135e8a5e25d27.1747054471.git.anatoly.burakov@intel.com> (raw)
In-Reply-To: <afcd4013458bdb8b970e7ed57d6a1ede0ee654b8.1747054471.git.anatoly.burakov@intel.com>
Currently, for 32-byte descriptor format, only SSE instruction set is
supported. Add implementation for AVX2 and AVX512 instruction sets. This
implementation similarly constant-propagates everything at compile time and
thus should not affect performance of existing code paths. To improve code
readability and reduce code duplication due to supporting different sized
descriptors, the implementation is also refactored.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
drivers/net/intel/common/rx_vec_sse.h | 380 ++++++++++++++------------
1 file changed, 205 insertions(+), 175 deletions(-)
diff --git a/drivers/net/intel/common/rx_vec_sse.h b/drivers/net/intel/common/rx_vec_sse.h
index 6fe0baf38b..0aeaac3dc9 100644
--- a/drivers/net/intel/common/rx_vec_sse.h
+++ b/drivers/net/intel/common/rx_vec_sse.h
@@ -48,223 +48,258 @@ _ci_rxq_rearm_get_bufs(struct ci_rx_queue *rxq, const size_t desc_len)
return 0;
}
-/*
- * SSE code path can handle both 16-byte and 32-byte descriptors with one code
- * path, as we only ever write 16 bytes at a time.
- */
-static __rte_always_inline void
-_ci_rxq_rearm_sse(struct ci_rx_queue *rxq, const size_t desc_len)
+static __rte_always_inline __m128i
+_ci_rxq_rearm_desc_sse(const __m128i vaddr)
{
const __m128i hdr_room = _mm_set1_epi64x(RTE_PKTMBUF_HEADROOM);
const __m128i zero = _mm_setzero_si128();
+ __m128i reg;
+
+ /* add headroom to address values */
+ reg = _mm_add_epi64(vaddr, hdr_room);
+
+#if RTE_IOVA_IN_MBUF
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ /* move IOVA to Packet Buffer Address, erase Header Buffer Address */
+ reg = _mm_unpackhi_epi64(reg, zero);
+#else
+ /* erase Header Buffer Address */
+ reg = _mm_unpacklo_epi64(reg, zero);
+#endif
+ return reg;
+}
+
+static __rte_always_inline void
+_ci_rxq_rearm_sse(struct ci_rx_queue *rxq, const size_t desc_len)
+{
const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
+ const uint8_t desc_per_reg = 1;
+ const uint8_t desc_per_iter = desc_per_reg * 2;
volatile void *rxdp;
int i;
rxdp = RTE_PTR_ADD(rxq->rx_ring, rxq->rxrearm_start * desc_len);
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < rearm_thresh; i += 2, rxp += 2, rxdp = RTE_PTR_ADD(rxdp, 2 * desc_len)) {
+ for (i = 0; i < rearm_thresh;
+ i += desc_per_iter,
+ rxp += desc_per_iter,
+ rxdp = RTE_PTR_ADD(rxdp, desc_per_iter * desc_len)) {
volatile void *ptr0 = RTE_PTR_ADD(rxdp, 0);
- volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len);
- __m128i vaddr0, vaddr1;
- __m128i dma_addr0, dma_addr1;
- struct rte_mbuf *mb0, *mb1;
+ volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len * desc_per_reg);
+ const struct rte_mbuf *mb0 = rxp[0].mbuf;
+ const struct rte_mbuf *mb1 = rxp[1].mbuf;
- mb0 = rxp[0].mbuf;
- mb1 = rxp[1].mbuf;
+ const __m128i vaddr0 = _mm_loadu_si128((const __m128i *)&mb0->buf_addr);
+ const __m128i vaddr1 = _mm_loadu_si128((const __m128i *)&mb1->buf_addr);
-#if RTE_IOVA_IN_MBUF
- /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
- offsetof(struct rte_mbuf, buf_addr) + 8);
-#endif
- vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
- vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
-
- /* add headroom to address values */
- vaddr0 = _mm_add_epi64(vaddr0, hdr_room);
- vaddr1 = _mm_add_epi64(vaddr1, hdr_room);
-
-#if RTE_IOVA_IN_MBUF
- /* move IOVA to Packet Buffer Address, erase Header Buffer Address */
- dma_addr0 = _mm_unpackhi_epi64(vaddr0, zero);
- dma_addr1 = _mm_unpackhi_epi64(vaddr1, zero);
-#else
- /* erase Header Buffer Address */
- dma_addr0 = _mm_unpacklo_epi64(vaddr0, zero);
- dma_addr1 = _mm_unpacklo_epi64(vaddr1, zero);
-#endif
+ const __m128i reg0 = _ci_rxq_rearm_desc_sse(vaddr0);
+ const __m128i reg1 = _ci_rxq_rearm_desc_sse(vaddr1);
/* flush desc with pa dma_addr */
- _mm_store_si128(RTE_CAST_PTR(__m128i *, ptr0), dma_addr0);
- _mm_store_si128(RTE_CAST_PTR(__m128i *, ptr1), dma_addr1);
+ _mm_store_si128(RTE_CAST_PTR(__m128i *, ptr0), reg0);
+ _mm_store_si128(RTE_CAST_PTR(__m128i *, ptr1), reg1);
}
}
#ifdef __AVX2__
-/* AVX2 version for 16-byte descriptors, handles 4 buffers at a time */
-static __rte_always_inline void
-_ci_rxq_rearm_avx2(struct ci_rx_queue *rxq)
+static __rte_always_inline __m256i
+_ci_rxq_rearm_desc_avx2(const __m128i vaddr0, const __m128i vaddr1)
{
- struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
- const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
- const size_t desc_len = 16;
- volatile void *rxdp;
const __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM);
const __m256i zero = _mm256_setzero_si256();
+ __m256i reg;
+
+ /* merge by casting 0 to 256-bit and inserting 1 into the high lanes */
+ reg =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
+ vaddr1, 1);
+
+ /* add headroom to address values */
+ reg = _mm256_add_epi64(reg, hdr_room);
+
+#if RTE_IOVA_IN_MBUF
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ /* extract IOVA addr into Packet Buffer Address, erase Header Buffer Address */
+ reg = _mm256_unpackhi_epi64(reg, zero);
+#else
+ /* erase Header Buffer Address */
+ reg = _mm256_unpacklo_epi64(reg, zero);
+#endif
+ return reg;
+}
+
+static __rte_always_inline void
+_ci_rxq_rearm_avx2(struct ci_rx_queue *rxq, const size_t desc_len)
+{
+ struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
+ const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
+ /* how many descriptors can fit into a register */
+ const uint8_t desc_per_reg = sizeof(__m256i) / desc_len;
+ /* how many descriptors can fit into one loop iteration */
+ const uint8_t desc_per_iter = desc_per_reg * 2;
+ volatile void *rxdp;
int i;
rxdp = RTE_PTR_ADD(rxq->rx_ring, rxq->rxrearm_start * desc_len);
- /* Initialize the mbufs in vector, process 4 mbufs in one loop */
- for (i = 0; i < rearm_thresh; i += 4, rxp += 4, rxdp = RTE_PTR_ADD(rxdp, 4 * desc_len)) {
+ /* Initialize the mbufs in vector, process 2 or 4 mbufs in one loop */
+ for (i = 0; i < rearm_thresh;
+ i += desc_per_iter,
+ rxp += desc_per_iter,
+ rxdp = RTE_PTR_ADD(rxdp, desc_per_iter * desc_len)) {
volatile void *ptr0 = RTE_PTR_ADD(rxdp, 0);
- volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len * 2);
- __m128i vaddr0, vaddr1, vaddr2, vaddr3;
- __m256i vaddr0_1, vaddr2_3;
- __m256i dma_addr0_1, dma_addr2_3;
- struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
+ volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len * desc_per_reg);
+ __m256i reg0, reg1;
- mb0 = rxp[0].mbuf;
- mb1 = rxp[1].mbuf;
- mb2 = rxp[2].mbuf;
- mb3 = rxp[3].mbuf;
+ if (desc_per_iter == 2) {
+ /* 16 byte descriptor, 16 byte zero, times two */
+ const __m128i zero = _mm_setzero_si128();
+ const struct rte_mbuf *mb0 = rxp[0].mbuf;
+ const struct rte_mbuf *mb1 = rxp[1].mbuf;
-#if RTE_IOVA_IN_MBUF
- /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
- offsetof(struct rte_mbuf, buf_addr) + 8);
-#endif
- vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
- vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
- vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
- vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
+ const __m128i vaddr0 = _mm_loadu_si128((const __m128i *)&mb0->buf_addr);
+ const __m128i vaddr1 = _mm_loadu_si128((const __m128i *)&mb1->buf_addr);
- /**
- * merge 0 & 1, by casting 0 to 256-bit and inserting 1
- * into the high lanes. Similarly for 2 & 3
- */
- vaddr0_1 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
- vaddr1, 1);
- vaddr2_3 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
- vaddr3, 1);
+ reg0 = _ci_rxq_rearm_desc_avx2(vaddr0, zero);
+ reg1 = _ci_rxq_rearm_desc_avx2(vaddr1, zero);
+ } else {
+ /* 16 byte descriptor times four */
+ const struct rte_mbuf *mb0 = rxp[0].mbuf;
+ const struct rte_mbuf *mb1 = rxp[1].mbuf;
+ const struct rte_mbuf *mb2 = rxp[2].mbuf;
+ const struct rte_mbuf *mb3 = rxp[3].mbuf;
- /* add headroom to address values */
- vaddr0_1 = _mm256_add_epi64(vaddr0_1, hdr_room);
- vaddr0_1 = _mm256_add_epi64(vaddr0_1, hdr_room);
+ const __m128i vaddr0 = _mm_loadu_si128((const __m128i *)&mb0->buf_addr);
+ const __m128i vaddr1 = _mm_loadu_si128((const __m128i *)&mb1->buf_addr);
+ const __m128i vaddr2 = _mm_loadu_si128((const __m128i *)&mb2->buf_addr);
+ const __m128i vaddr3 = _mm_loadu_si128((const __m128i *)&mb3->buf_addr);
-#if RTE_IOVA_IN_MBUF
- /* extract IOVA addr into Packet Buffer Address, erase Header Buffer Address */
- dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, zero);
- dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, zero);
-#else
- /* erase Header Buffer Address */
- dma_addr0_1 = _mm256_unpacklo_epi64(vaddr0_1, zero);
- dma_addr2_3 = _mm256_unpacklo_epi64(vaddr2_3, zero);
-#endif
+ reg0 = _ci_rxq_rearm_desc_avx2(vaddr0, vaddr1);
+ reg1 = _ci_rxq_rearm_desc_avx2(vaddr2, vaddr3);
+ }
/* flush desc with pa dma_addr */
- _mm256_store_si256(RTE_CAST_PTR(__m256i *, ptr0), dma_addr0_1);
- _mm256_store_si256(RTE_CAST_PTR(__m256i *, ptr1), dma_addr2_3);
+ _mm256_store_si256(RTE_CAST_PTR(__m256i *, ptr0), reg0);
+ _mm256_store_si256(RTE_CAST_PTR(__m256i *, ptr1), reg1);
}
}
#endif /* __AVX2__ */
#ifdef __AVX512VL__
-/* AVX512 version for 16-byte descriptors, handles 8 buffers at a time */
+static __rte_always_inline __m512i
+_ci_rxq_rearm_desc_avx512(const __m128i vaddr0, const __m128i vaddr1,
+ const __m128i vaddr2, const __m128i vaddr3)
+{
+ const __m512i zero = _mm512_setzero_si512();
+ const __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
+ __m256i vaddr0_1, vaddr2_3;
+ __m512i reg;
+
+ /**
+ * merge 0 & 1, by casting 0 to 256-bit and inserting 1 into the high
+ * lanes. Similarly for 2 & 3.
+ */
+ vaddr0_1 =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
+ vaddr1, 1);
+ vaddr2_3 =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
+ vaddr3, 1);
+ /*
+ * merge 0+1 & 2+3, by casting 0+1 to 512-bit and inserting 2+3 into the
+ * high lanes.
+ */
+ reg =
+ _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
+ vaddr2_3, 1);
+
+ /* add headroom to address values */
+ reg = _mm512_add_epi64(reg, hdr_room);
+
+#if RTE_IOVA_IN_MBUF
+ /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
+ offsetof(struct rte_mbuf, buf_addr) + 8);
+ /* extract IOVA addr into Packet Buffer Address, erase Header Buffer Address */
+ reg = _mm512_unpackhi_epi64(reg, zero);
+#else
+ /* erase Header Buffer Address */
+ reg = _mm512_unpacklo_epi64(reg, zero);
+#endif
+ return reg;
+}
+
static __rte_always_inline void
-_ci_rxq_rearm_avx512(struct ci_rx_queue *rxq)
+_ci_rxq_rearm_avx512(struct ci_rx_queue *rxq, const size_t desc_len)
{
struct ci_rx_entry *rxp = &rxq->sw_ring[rxq->rxrearm_start];
const uint16_t rearm_thresh = CI_VPMD_RX_REARM_THRESH;
- const size_t desc_len = 16;
+ /* how many descriptors can fit into a register */
+ const uint8_t desc_per_reg = sizeof(__m512i) / desc_len;
+ /* how many descriptors can fit into one loop iteration */
+ const uint8_t desc_per_iter = desc_per_reg * 2;
volatile void *rxdp;
int i;
- struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
- struct rte_mbuf *mb4, *mb5, *mb6, *mb7;
- __m512i dma_addr0_3, dma_addr4_7;
- __m512i hdr_room = _mm512_set1_epi64(RTE_PKTMBUF_HEADROOM);
- __m512i zero = _mm512_setzero_si512();
rxdp = RTE_PTR_ADD(rxq->rx_ring, rxq->rxrearm_start * desc_len);
- /* Initialize the mbufs in vector, process 8 mbufs in one loop */
- for (i = 0; i < rearm_thresh; i += 8, rxp += 8, rxdp = RTE_PTR_ADD(rxdp, 8 * desc_len)) {
+ /* Initialize the mbufs in vector, process 4 or 8 mbufs in one loop */
+ for (i = 0; i < rearm_thresh;
+ i += desc_per_iter,
+ rxp += desc_per_iter,
+ rxdp = RTE_PTR_ADD(rxdp, desc_per_iter * desc_len)) {
volatile void *ptr0 = RTE_PTR_ADD(rxdp, 0);
- volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len * 4);
- __m128i vaddr0, vaddr1, vaddr2, vaddr3;
- __m128i vaddr4, vaddr5, vaddr6, vaddr7;
- __m256i vaddr0_1, vaddr2_3;
- __m256i vaddr4_5, vaddr6_7;
- __m512i vaddr0_3, vaddr4_7;
+ volatile void *ptr1 = RTE_PTR_ADD(rxdp, desc_len * desc_per_reg);
+ __m512i reg0, reg1;
- mb0 = rxp[0].mbuf;
- mb1 = rxp[1].mbuf;
- mb2 = rxp[2].mbuf;
- mb3 = rxp[3].mbuf;
- mb4 = rxp[4].mbuf;
- mb5 = rxp[5].mbuf;
- mb6 = rxp[6].mbuf;
- mb7 = rxp[7].mbuf;
+ if (desc_per_iter == 4) {
+ /* 16-byte descriptor, 16 byte zero, times four */
+ const __m128i zero = _mm_setzero_si128();
+ const struct rte_mbuf *mb0 = rxp[0].mbuf;
+ const struct rte_mbuf *mb1 = rxp[1].mbuf;
+ const struct rte_mbuf *mb2 = rxp[2].mbuf;
+ const struct rte_mbuf *mb3 = rxp[3].mbuf;
-#if RTE_IOVA_IN_MBUF
- /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) !=
- offsetof(struct rte_mbuf, buf_addr) + 8);
-#endif
- vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
- vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
- vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr);
- vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr);
- vaddr4 = _mm_loadu_si128((__m128i *)&mb4->buf_addr);
- vaddr5 = _mm_loadu_si128((__m128i *)&mb5->buf_addr);
- vaddr6 = _mm_loadu_si128((__m128i *)&mb6->buf_addr);
- vaddr7 = _mm_loadu_si128((__m128i *)&mb7->buf_addr);
+ const __m128i vaddr0 = _mm_loadu_si128((const __m128i *)&mb0->buf_addr);
+ const __m128i vaddr1 = _mm_loadu_si128((const __m128i *)&mb1->buf_addr);
+ const __m128i vaddr2 = _mm_loadu_si128((const __m128i *)&mb2->buf_addr);
+ const __m128i vaddr3 = _mm_loadu_si128((const __m128i *)&mb3->buf_addr);
- /**
- * merge 0 & 1, by casting 0 to 256-bit and inserting 1
- * into the high lanes. Similarly for 2 & 3, and so on.
- */
- vaddr0_1 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0),
- vaddr1, 1);
- vaddr2_3 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2),
- vaddr3, 1);
- vaddr4_5 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr4),
- vaddr5, 1);
- vaddr6_7 =
- _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr6),
- vaddr7, 1);
- vaddr0_3 =
- _mm512_inserti64x4(_mm512_castsi256_si512(vaddr0_1),
- vaddr2_3, 1);
- vaddr4_7 =
- _mm512_inserti64x4(_mm512_castsi256_si512(vaddr4_5),
- vaddr6_7, 1);
+ reg0 = _ci_rxq_rearm_desc_avx512(vaddr0, zero, vaddr1, zero);
+ reg1 = _ci_rxq_rearm_desc_avx512(vaddr2, zero, vaddr3, zero);
+ } else {
+ /* 16-byte descriptor times eight */
+ const struct rte_mbuf *mb0 = rxp[0].mbuf;
+ const struct rte_mbuf *mb1 = rxp[1].mbuf;
+ const struct rte_mbuf *mb2 = rxp[2].mbuf;
+ const struct rte_mbuf *mb3 = rxp[3].mbuf;
+ const struct rte_mbuf *mb4 = rxp[4].mbuf;
+ const struct rte_mbuf *mb5 = rxp[5].mbuf;
+ const struct rte_mbuf *mb6 = rxp[6].mbuf;
+ const struct rte_mbuf *mb7 = rxp[7].mbuf;
- /* add headroom to address values */
- vaddr0_3 = _mm512_add_epi64(vaddr0_3, hdr_room);
- dma_addr4_7 = _mm512_add_epi64(dma_addr4_7, hdr_room);
+ const __m128i vaddr0 = _mm_loadu_si128((const __m128i *)&mb0->buf_addr);
+ const __m128i vaddr1 = _mm_loadu_si128((const __m128i *)&mb1->buf_addr);
+ const __m128i vaddr2 = _mm_loadu_si128((const __m128i *)&mb2->buf_addr);
+ const __m128i vaddr3 = _mm_loadu_si128((const __m128i *)&mb3->buf_addr);
+ const __m128i vaddr4 = _mm_loadu_si128((const __m128i *)&mb4->buf_addr);
+ const __m128i vaddr5 = _mm_loadu_si128((const __m128i *)&mb5->buf_addr);
+ const __m128i vaddr6 = _mm_loadu_si128((const __m128i *)&mb6->buf_addr);
+ const __m128i vaddr7 = _mm_loadu_si128((const __m128i *)&mb7->buf_addr);
-#if RTE_IOVA_IN_MBUF
- /* extract IOVA addr into Packet Buffer Address, erase Header Buffer Address */
- dma_addr0_3 = _mm512_unpackhi_epi64(vaddr0_3, zero);
- dma_addr4_7 = _mm512_unpackhi_epi64(vaddr4_7, zero);
-#else
- /* erase Header Buffer Address */
- dma_addr0_3 = _mm512_unpacklo_epi64(vaddr0_3, zero);
- dma_addr4_7 = _mm512_unpacklo_epi64(vaddr4_7, zero);
-#endif
+ reg0 = _ci_rxq_rearm_desc_avx512(vaddr0, vaddr1, vaddr2, vaddr3);
+ reg1 = _ci_rxq_rearm_desc_avx512(vaddr4, vaddr5, vaddr6, vaddr7);
+ }
/* flush desc with pa dma_addr */
- _mm512_store_si512(RTE_CAST_PTR(__m512i *, ptr0), dma_addr0_3);
- _mm512_store_si512(RTE_CAST_PTR(__m512i *, ptr1), dma_addr4_7);
+ _mm512_store_si512(RTE_CAST_PTR(__m512i *, ptr0), reg0);
+ _mm512_store_si512(RTE_CAST_PTR(__m512i *, ptr1), reg1);
}
}
#endif /* __AVX512VL__ */
@@ -280,31 +315,26 @@ ci_rxq_rearm(struct ci_rx_queue *rxq, const size_t desc_len,
if (_ci_rxq_rearm_get_bufs(rxq, desc_len) < 0)
return;
- if (desc_len == 16) {
- switch (vec_level) {
- case CI_RX_VEC_LEVEL_AVX512:
+ switch (vec_level) {
+ case CI_RX_VEC_LEVEL_AVX512:
#ifdef __AVX512VL__
- _ci_rxq_rearm_avx512(rxq);
- break;
+ _ci_rxq_rearm_avx512(rxq, desc_len);
+ break;
#else
- /* fall back to AVX2 unless requested not to */
- /* fall through */
+ /* fall back to AVX2 unless requested not to */
+ /* fall through */
#endif
- case CI_RX_VEC_LEVEL_AVX2:
+ case CI_RX_VEC_LEVEL_AVX2:
#ifdef __AVX2__
- _ci_rxq_rearm_avx2(rxq);
+ _ci_rxq_rearm_avx2(rxq, desc_len);
break;
#else
/* fall back to SSE if AVX2 isn't supported */
/* fall through */
#endif
- case CI_RX_VEC_LEVEL_SSE:
- _ci_rxq_rearm_sse(rxq, desc_len);
- break;
- }
- } else {
- /* for 32-byte descriptors only support SSE */
+ case CI_RX_VEC_LEVEL_SSE:
_ci_rxq_rearm_sse(rxq, desc_len);
+ break;
}
rxq->rxrearm_start += rearm_thresh;
--
2.47.1
next prev parent reply other threads:[~2025-05-12 12:56 UTC|newest]
Thread overview: 236+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-06 13:27 [PATCH v1 01/13] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 02/13] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 03/13] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 04/13] net/i40e: use the " Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 05/13] net/ice: " Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 06/13] net/iavf: " Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 07/13] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 08/13] net/i40e: use common Rx rearm code Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 09/13] net/iavf: " Anatoly Burakov
2025-05-06 13:27 ` [PATCH v1 10/13] net/ixgbe: " Anatoly Burakov
2025-05-06 13:28 ` [PATCH v1 11/13] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-05-06 13:28 ` [PATCH v1 12/13] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-05-06 13:28 ` [PATCH v1 13/13] net/intel: add common Tx " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 01/13] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 02/13] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 03/13] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 04/13] net/i40e: use the " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 05/13] net/ice: " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 06/13] net/iavf: " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 07/13] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 08/13] net/i40e: use common Rx rearm code Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 09/13] net/iavf: " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 10/13] net/ixgbe: " Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 11/13] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 12/13] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-05-12 10:58 ` [PATCH v2 13/13] net/intel: add common Tx " Anatoly Burakov
2025-05-12 12:54 ` [PATCH v3 01/13] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-05-12 12:54 ` [PATCH v3 02/13] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-05-14 16:39 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 03/13] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-05-14 16:45 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 04/13] net/i40e: use the " Anatoly Burakov
2025-05-14 16:52 ` Bruce Richardson
2025-05-15 11:09 ` Burakov, Anatoly
2025-05-15 12:55 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 05/13] net/ice: " Anatoly Burakov
2025-05-14 16:56 ` Bruce Richardson
2025-05-23 11:16 ` Burakov, Anatoly
2025-05-12 12:54 ` [PATCH v3 06/13] net/iavf: " Anatoly Burakov
2025-05-15 10:59 ` Bruce Richardson
2025-05-15 11:11 ` Burakov, Anatoly
2025-05-15 12:57 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 07/13] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-05-15 10:56 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 08/13] net/i40e: use common Rx rearm code Anatoly Burakov
2025-05-15 10:58 ` Bruce Richardson
2025-05-12 12:54 ` [PATCH v3 09/13] net/iavf: " Anatoly Burakov
2025-05-12 12:54 ` [PATCH v3 10/13] net/ixgbe: " Anatoly Burakov
2025-05-12 12:54 ` Anatoly Burakov [this message]
2025-05-12 12:54 ` [PATCH v3 12/13] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-05-12 12:54 ` [PATCH v3 13/13] net/intel: add common Tx " Anatoly Burakov
2025-05-15 11:07 ` Bruce Richardson
2025-05-12 12:58 ` [PATCH v3 01/13] net/ixgbe: remove unused field in Rx queue struct Bruce Richardson
2025-05-14 16:32 ` Bruce Richardson
2025-05-15 11:15 ` Burakov, Anatoly
2025-05-15 12:58 ` Bruce Richardson
2025-05-30 13:56 ` [PATCH v4 00/25] Intel PMD drivers Rx cleanp Anatoly Burakov
2025-05-30 13:56 ` [PATCH v4 01/25] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-05-30 13:56 ` [PATCH v4 02/25] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-05-30 13:56 ` [PATCH v4 03/25] net/ixgbe: match variable names to other drivers Anatoly Burakov
2025-06-03 15:54 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 04/25] net/i40e: match variable name " Anatoly Burakov
2025-06-03 15:56 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 05/25] net/ice: " Anatoly Burakov
2025-06-03 15:57 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 06/25] net/i40e: rename 16-byte descriptor define Anatoly Burakov
2025-06-03 15:58 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 07/25] net/ice: " Anatoly Burakov
2025-06-03 15:59 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 08/25] net/iavf: " Anatoly Burakov
2025-06-03 16:06 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 09/25] net/ixgbe: simplify vector PMD compilation Anatoly Burakov
2025-06-03 16:09 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 10/25] net/ixgbe: replace always-true check Anatoly Burakov
2025-06-03 16:15 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 11/25] net/ixgbe: clean up definitions Anatoly Burakov
2025-06-03 16:17 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 12/25] net/i40e: " Anatoly Burakov
2025-06-03 16:19 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 13/25] net/ice: " Anatoly Burakov
2025-06-03 16:20 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 14/25] net/iavf: " Anatoly Burakov
2025-06-03 16:21 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 15/25] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-06-03 16:45 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 16/25] net/i40e: use the " Anatoly Burakov
2025-06-03 16:57 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 17/25] net/ice: " Anatoly Burakov
2025-06-03 17:02 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 18/25] net/iavf: " Anatoly Burakov
2025-06-03 17:05 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 19/25] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-06-04 9:32 ` Bruce Richardson
2025-06-04 9:43 ` Morten Brørup
2025-06-04 9:49 ` Bruce Richardson
2025-06-04 10:18 ` Morten Brørup
2025-05-30 13:57 ` [PATCH v4 20/25] net/i40e: use common Rx rearm code Anatoly Burakov
2025-06-04 9:33 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 21/25] net/iavf: " Anatoly Burakov
2025-06-04 9:34 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 22/25] net/ixgbe: " Anatoly Burakov
2025-06-04 9:40 ` Bruce Richardson
2025-06-05 9:22 ` Burakov, Anatoly
2025-05-30 13:57 ` [PATCH v4 23/25] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-06-04 12:32 ` Bruce Richardson
2025-06-04 14:59 ` Bruce Richardson
2025-06-05 9:29 ` Burakov, Anatoly
2025-06-05 9:31 ` Bruce Richardson
2025-06-05 10:09 ` Morten Brørup
2025-05-30 13:57 ` [PATCH v4 24/25] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-06-04 15:09 ` Bruce Richardson
2025-05-30 13:57 ` [PATCH v4 25/25] net/intel: add common Tx " Anatoly Burakov
2025-06-04 15:18 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 00/34] Intel PMD drivers Rx cleanup Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 01/34] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 02/34] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 03/34] net/ixgbe: match variable names to other drivers Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 04/34] net/i40e: match variable name " Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 05/34] net/ice: " Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 06/34] net/i40e: rename 16-byte descriptor define Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 07/34] net/ice: " Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 08/34] net/iavf: remove " Anatoly Burakov
2025-06-09 10:23 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 09/34] net/ixgbe: simplify packet type support check Anatoly Burakov
2025-06-09 10:24 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 10/34] net/ixgbe: adjust indentation Anatoly Burakov
2025-06-09 10:25 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 11/34] net/ixgbe: remove unnecessary platform checks Anatoly Burakov
2025-06-09 10:29 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 12/34] net/ixgbe: make context desc creation non-static Anatoly Burakov
2025-06-09 10:38 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 13/34] net/ixgbe: decouple scalar and vec rxq free mbufs Anatoly Burakov
2025-06-09 10:43 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 14/34] net/ixgbe: rename vector txq " Anatoly Burakov
2025-06-09 10:44 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 15/34] net/ixgbe: refactor vector common code Anatoly Burakov
2025-06-09 10:50 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 16/34] net/ixgbe: move vector Rx/Tx code to vec common Anatoly Burakov
2025-06-09 11:05 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 17/34] net/ixgbe: simplify vector PMD compilation Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 18/34] net/ixgbe: replace always-true check Anatoly Burakov
2025-06-06 17:08 ` [PATCH v5 19/34] net/ixgbe: add a desc done function Anatoly Burakov
2025-06-09 9:04 ` Burakov, Anatoly
2025-06-09 11:56 ` Bruce Richardson
2025-06-06 17:08 ` [PATCH v5 20/34] net/ixgbe: clean up definitions Anatoly Burakov
2025-06-06 17:09 ` [PATCH v5 21/34] net/i40e: " Anatoly Burakov
2025-06-06 17:09 ` [PATCH v5 22/34] net/ice: " Anatoly Burakov
2025-06-06 17:09 ` [PATCH v5 23/34] net/iavf: " Anatoly Burakov
2025-06-06 17:09 ` [PATCH v5 24/34] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-06-06 17:15 ` [PATCH v5 25/34] net/i40e: use the " Anatoly Burakov
2025-06-06 17:16 ` [PATCH v5 26/34] net/ice: " Anatoly Burakov
2025-06-06 17:16 ` [PATCH v5 27/34] net/iavf: " Anatoly Burakov
2025-06-09 11:08 ` Bruce Richardson
2025-06-06 17:16 ` [PATCH v5 28/34] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-06-06 17:16 ` [PATCH v5 29/34] net/i40e: use common Rx rearm code Anatoly Burakov
2025-06-06 17:16 ` [PATCH v5 30/34] net/iavf: " Anatoly Burakov
2025-06-06 17:17 ` [PATCH v5 31/34] net/ixgbe: " Anatoly Burakov
2025-06-06 17:17 ` [PATCH v5 32/34] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-06-09 11:54 ` Bruce Richardson
2025-06-09 14:52 ` Burakov, Anatoly
2025-06-06 17:17 ` [PATCH v5 33/34] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-06-06 17:17 ` [PATCH v5 34/34] net/intel: add common Tx " Anatoly Burakov
2025-06-09 15:36 ` [PATCH v6 00/33] Intel PMD drivers Rx cleanup Anatoly Burakov
2025-06-09 15:36 ` [PATCH v6 01/33] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 02/33] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 03/33] net/ixgbe: match variable names to other drivers Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 04/33] net/i40e: match variable name " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 05/33] net/ice: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 06/33] net/i40e: rename 16-byte descriptor define Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 07/33] net/ice: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 08/33] net/iavf: remove " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 09/33] net/ixgbe: simplify packet type support check Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 10/33] net/ixgbe: adjust indentation Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 11/33] net/ixgbe: remove unnecessary platform checks Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 12/33] net/ixgbe: decouple scalar and vec rxq free mbufs Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 13/33] net/ixgbe: rename vector txq " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 14/33] net/ixgbe: refactor vector common code Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 15/33] net/ixgbe: move vector Rx/Tx code to vec common Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 16/33] net/ixgbe: simplify vector PMD compilation Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 17/33] net/ixgbe: replace always-true check Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 18/33] net/ixgbe: add a desc done function Anatoly Burakov
2025-06-11 14:47 ` Bruce Richardson
2025-06-09 15:37 ` [PATCH v6 19/33] net/ixgbe: clean up definitions Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 20/33] net/i40e: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 21/33] net/ice: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 22/33] net/iavf: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 23/33] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-06-12 10:12 ` Varghese, Vipin
2025-06-12 10:18 ` Bruce Richardson
2025-06-12 11:09 ` Varghese, Vipin
2025-06-09 15:37 ` [PATCH v6 24/33] net/i40e: use the " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 25/33] net/ice: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 26/33] net/iavf: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 27/33] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 28/33] net/i40e: use common Rx rearm code Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 29/33] net/iavf: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 30/33] net/ixgbe: " Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 31/33] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 32/33] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-06-09 15:37 ` [PATCH v6 33/33] net/intel: add common Tx " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 00/33] Intel PMD drivers Rx cleanup Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 01/33] net/ixgbe: remove unused field in Rx queue struct Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 02/33] net/iavf: make IPsec stats dynamically allocated Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 03/33] net/ixgbe: match variable names to other drivers Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 04/33] net/i40e: match variable name " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 05/33] net/ice: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 06/33] net/i40e: rename 16-byte descriptor define Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 07/33] net/ice: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 08/33] net/iavf: remove " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 09/33] net/ixgbe: simplify packet type support check Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 10/33] net/ixgbe: adjust indentation Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 11/33] net/ixgbe: remove unnecessary platform checks Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 12/33] net/ixgbe: decouple scalar and vec rxq free mbufs Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 13/33] net/ixgbe: rename vector txq " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 14/33] net/ixgbe: refactor vector common code Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 15/33] net/ixgbe: move vector Rx/Tx code to vec common Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 16/33] net/ixgbe: simplify vector PMD compilation Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 17/33] net/ixgbe: replace always-true check Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 18/33] net/ixgbe: add a desc done function Anatoly Burakov
2025-06-12 11:17 ` Burakov, Anatoly
2025-06-12 11:11 ` [PATCH v7 19/33] net/ixgbe: clean up definitions Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 20/33] net/i40e: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 21/33] net/ice: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 22/33] net/iavf: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 23/33] net/ixgbe: create common Rx queue structure Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 24/33] net/i40e: use the " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 25/33] net/ice: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 26/33] net/iavf: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 27/33] net/intel: generalize vectorized Rx rearm Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 28/33] net/i40e: use common Rx rearm code Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 29/33] net/iavf: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 30/33] net/ixgbe: " Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 31/33] net/intel: support wider x86 vectors for Rx rearm Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 32/33] net/intel: add common Rx mbuf recycle Anatoly Burakov
2025-06-12 11:11 ` [PATCH v7 33/33] net/intel: add common Tx " Anatoly Burakov
2025-06-13 13:36 ` [PATCH v7 00/33] Intel PMD drivers Rx cleanup Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ee702becb1f88facce490c19ac8135e8a5e25d27.1747054471.git.anatoly.burakov@intel.com \
--to=anatoly.burakov@intel.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).