From: Wenzhuo Lu <wenzhuo.lu@intel.com>
To: dev@dpdk.org
Cc: Wenzhuo Lu <wenzhuo.lu@intel.com>
Subject: [dpdk-dev] [PATCH v2 2/2] net/ice: add Rx AVX2 offload path
Date: Mon, 28 Jun 2021 15:53:04 +0800 [thread overview]
Message-ID: <1624866784-2458-3-git-send-email-wenzhuo.lu@intel.com> (raw)
In-Reply-To: <1624866784-2458-1-git-send-email-wenzhuo.lu@intel.com>
Add a specific path for RX AVX2.
In this path, support the HW offload features, like,
checksum, VLAN stripping, RSS hash.
This path is chosen automatically according to the
configuration.
'inline' is used, then the duplicate code is generated
by the compiler.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
---
doc/guides/rel_notes/release_21_08.rst | 6 +
drivers/net/ice/ice_rxtx.c | 50 ++++--
drivers/net/ice/ice_rxtx.h | 5 +
drivers/net/ice/ice_rxtx_vec_avx2.c | 296 +++++++++++++++++++--------------
4 files changed, 217 insertions(+), 140 deletions(-)
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index a6ecfdf..203b772 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -55,6 +55,12 @@ New Features
Also, make sure to start the actual text at the margin.
=======================================================
+* **Updated Intel ice driver.**
+
+ * In AVX2 code, added the new RX and TX paths to use the HW offload
+ features. When the HW offload features are configured to be used, the
+ offload paths are chosen automatically. In parallel the support for HW
+ offload features was removed from the legacy AVX2 paths.
Removed Items
-------------
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5d7ca60..27fd248 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1999,7 +1999,9 @@
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512_offload ||
#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
- dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx2_offload ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2_offload)
return ptypes;
#endif
@@ -3058,7 +3060,7 @@
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
- int rx_check_ret;
+ int rx_check_ret = -1;
bool use_avx512 = false;
bool use_avx2 = false;
@@ -3113,14 +3115,25 @@
ice_recv_scattered_pkts_vec_avx512;
}
#endif
+ } else if (use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx2;
+ }
} else {
PMD_DRV_LOG(DEBUG,
- "Using %sVector Scattered Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ "Using Vector Scattered Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_scattered_pkts_vec_avx2 :
- ice_recv_scattered_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
}
} else {
if (use_avx512) {
@@ -3139,14 +3152,25 @@
ice_recv_pkts_vec_avx512;
}
#endif
+ } else if (use_avx2) {
+ if (rx_check_ret == ICE_VECTOR_OFFLOAD_PATH) {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 OFFLOAD Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2_offload;
+ } else {
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX2 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx2;
+ }
} else {
PMD_DRV_LOG(DEBUG,
- "Using %sVector Rx (port %d).",
- use_avx2 ? "avx2 " : "",
+ "Using Vector Rx (port %d).",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ dev->rx_pkt_burst = ice_recv_pkts_vec;
}
}
return;
@@ -3191,7 +3215,9 @@
{ ice_recv_pkts_vec_avx512_offload, "Offload Vector AVX512" },
#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { ice_recv_scattered_pkts_vec_avx2_offload, "Offload Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
+ { ice_recv_pkts_vec_avx2_offload, "Offload Vector AVX2" },
{ ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
{ ice_recv_pkts_vec, "Vector SSE" },
#endif
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 595dc66..bd28a68 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -250,9 +250,14 @@ uint16_t ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c
index b83c1ac..2841597 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -16,7 +16,7 @@
return ice_rxq_rearm_common(rxq, false);
}
-static inline __m256i
+static __rte_always_inline __m256i
ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7)
{
#define FDID_MIS_MAGIC 0xFFFFFFFF
@@ -35,9 +35,10 @@
return fdir_flags;
}
-static inline uint16_t
+static __rte_always_inline uint16_t
_ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts, uint8_t *split_packet)
+ uint16_t nb_pkts, uint8_t *split_packet,
+ bool offload)
{
#define ICE_DESCS_PER_LOOP_AVX 8
@@ -385,39 +386,43 @@
*/
__m256i status0_7 = _mm256_unpacklo_epi64(status4_7,
status0_3);
+ __m256i mbuf_flags = _mm256_set1_epi32(0);
- /* now do flag manipulation */
+ if (offload) {
+ /* now do flag manipulation */
- /* get only flag/error bits we want */
- const __m256i flag_bits =
- _mm256_and_si256(status0_7, flags_mask);
- /**
- * l3_l4_error flags, shuffle, then shift to correct adjustment
- * of flags in flags_shuf, and finally mask out extra bits
- */
- __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
- _mm256_srli_epi32(flag_bits, 4));
- l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
-
- __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
- __m256i l4_outer_flags =
- _mm256_and_si256(l3_l4_flags, l4_outer_mask);
- l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
-
- __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
- l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
- l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
- l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
- /* set rss and vlan flags */
- const __m256i rss_vlan_flag_bits =
- _mm256_srli_epi32(flag_bits, 12);
- const __m256i rss_vlan_flags =
- _mm256_shuffle_epi8(rss_vlan_flags_shuf,
- rss_vlan_flag_bits);
-
- /* merge flags */
- __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags,
- rss_vlan_flags);
+ /* get only flag/error bits we want */
+ const __m256i flag_bits =
+ _mm256_and_si256(status0_7, flags_mask);
+ /**
+ * l3_l4_error flags, shuffle, then shift to correct adjustment
+ * of flags in flags_shuf, and finally mask out extra bits
+ */
+ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf,
+ _mm256_srli_epi32(flag_bits, 4));
+ l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1);
+
+ __m256i l4_outer_mask = _mm256_set1_epi32(0x6);
+ __m256i l4_outer_flags =
+ _mm256_and_si256(l3_l4_flags, l4_outer_mask);
+ l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20);
+
+ __m256i l3_l4_mask = _mm256_set1_epi32(~0x6);
+
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask);
+ l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags);
+ l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask);
+ /* set rss and vlan flags */
+ const __m256i rss_vlan_flag_bits =
+ _mm256_srli_epi32(flag_bits, 12);
+ const __m256i rss_vlan_flags =
+ _mm256_shuffle_epi8(rss_vlan_flags_shuf,
+ rss_vlan_flag_bits);
+
+ /* merge flags */
+ mbuf_flags = _mm256_or_si256(l3_l4_flags,
+ rss_vlan_flags);
+ }
if (rxq->fdir_enabled) {
const __m256i fdir_id4_7 =
@@ -461,95 +466,97 @@
_mm256_extract_epi32(fdir_id0_7, 4);
} /* if() on fdir_enabled */
+ if (offload) {
#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- /**
- * needs to load 2nd 16B of each desc for RSS hash parsing,
- * will cause performance drop to get into this context.
- */
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_RSS_HASH) {
- /* load bottom half of every 32B desc */
- const __m128i raw_desc_bh7 =
- _mm_load_si128
- ((void *)(&rxdp[7].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh6 =
- _mm_load_si128
- ((void *)(&rxdp[6].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh5 =
- _mm_load_si128
- ((void *)(&rxdp[5].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh4 =
- _mm_load_si128
- ((void *)(&rxdp[4].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh3 =
- _mm_load_si128
- ((void *)(&rxdp[3].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh2 =
- _mm_load_si128
- ((void *)(&rxdp[2].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh1 =
- _mm_load_si128
- ((void *)(&rxdp[1].wb.status_error1));
- rte_compiler_barrier();
- const __m128i raw_desc_bh0 =
- _mm_load_si128
- ((void *)(&rxdp[0].wb.status_error1));
-
- __m256i raw_desc_bh6_7 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh6),
- raw_desc_bh7, 1);
- __m256i raw_desc_bh4_5 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh4),
- raw_desc_bh5, 1);
- __m256i raw_desc_bh2_3 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh2),
- raw_desc_bh3, 1);
- __m256i raw_desc_bh0_1 =
- _mm256_inserti128_si256
- (_mm256_castsi128_si256(raw_desc_bh0),
- raw_desc_bh1, 1);
-
/**
- * to shift the 32b RSS hash value to the
- * highest 32b of each 128b before mask
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
*/
- __m256i rss_hash6_7 =
- _mm256_slli_epi64(raw_desc_bh6_7, 32);
- __m256i rss_hash4_5 =
- _mm256_slli_epi64(raw_desc_bh4_5, 32);
- __m256i rss_hash2_3 =
- _mm256_slli_epi64(raw_desc_bh2_3, 32);
- __m256i rss_hash0_1 =
- _mm256_slli_epi64(raw_desc_bh0_1, 32);
-
- __m256i rss_hash_msk =
- _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
- 0xFFFFFFFF, 0, 0, 0);
-
- rss_hash6_7 = _mm256_and_si256
- (rss_hash6_7, rss_hash_msk);
- rss_hash4_5 = _mm256_and_si256
- (rss_hash4_5, rss_hash_msk);
- rss_hash2_3 = _mm256_and_si256
- (rss_hash2_3, rss_hash_msk);
- rss_hash0_1 = _mm256_and_si256
- (rss_hash0_1, rss_hash_msk);
-
- mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
- mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
- mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
- mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
- } /* if() on RSS hash parsing */
+ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh7 =
+ _mm_load_si128
+ ((void *)(&rxdp[7].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh6 =
+ _mm_load_si128
+ ((void *)(&rxdp[6].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh5 =
+ _mm_load_si128
+ ((void *)(&rxdp[5].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh4 =
+ _mm_load_si128
+ ((void *)(&rxdp[4].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ __m256i raw_desc_bh6_7 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh6),
+ raw_desc_bh7, 1);
+ __m256i raw_desc_bh4_5 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh4),
+ raw_desc_bh5, 1);
+ __m256i raw_desc_bh2_3 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh2),
+ raw_desc_bh3, 1);
+ __m256i raw_desc_bh0_1 =
+ _mm256_inserti128_si256
+ (_mm256_castsi128_si256(raw_desc_bh0),
+ raw_desc_bh1, 1);
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m256i rss_hash6_7 =
+ _mm256_slli_epi64(raw_desc_bh6_7, 32);
+ __m256i rss_hash4_5 =
+ _mm256_slli_epi64(raw_desc_bh4_5, 32);
+ __m256i rss_hash2_3 =
+ _mm256_slli_epi64(raw_desc_bh2_3, 32);
+ __m256i rss_hash0_1 =
+ _mm256_slli_epi64(raw_desc_bh0_1, 32);
+
+ __m256i rss_hash_msk =
+ _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0,
+ 0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash6_7 = _mm256_and_si256
+ (rss_hash6_7, rss_hash_msk);
+ rss_hash4_5 = _mm256_and_si256
+ (rss_hash4_5, rss_hash_msk);
+ rss_hash2_3 = _mm256_and_si256
+ (rss_hash2_3, rss_hash_msk);
+ rss_hash0_1 = _mm256_and_si256
+ (rss_hash0_1, rss_hash_msk);
+
+ mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7);
+ mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5);
+ mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3);
+ mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1);
+ } /* if() on RSS hash parsing */
#endif
+ }
/**
* At this point, we have the 8 sets of flags in the low 16-bits
@@ -701,7 +708,16 @@
ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+ return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
+ nb_pkts, NULL, false);
+}
+
+uint16_t
+ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts,
+ nb_pkts, NULL, true);
}
/**
@@ -709,16 +725,16 @@
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
*/
-static uint16_t
+static __rte_always_inline uint16_t
ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+ uint16_t nb_pkts, bool offload)
{
struct ice_rx_queue *rxq = rx_queue;
uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
/* get some new buffers */
uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts,
- split_flags);
+ split_flags, offload);
if (nb_bufs == 0)
return 0;
@@ -751,22 +767,46 @@
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
*/
-uint16_t
-ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts,
+ bool offload)
{
uint16_t retval = 0;
while (nb_pkts > ICE_VPMD_RX_BURST) {
uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue,
- rx_pkts + retval, ICE_VPMD_RX_BURST);
+ rx_pkts + retval, ICE_VPMD_RX_BURST, offload);
retval += burst;
nb_pkts -= burst;
if (burst < ICE_VPMD_RX_BURST)
return retval;
}
return retval + ice_recv_scattered_burst_vec_avx2(rx_queue,
- rx_pkts + retval, nb_pkts);
+ rx_pkts + retval, nb_pkts, offload);
+}
+
+uint16_t
+ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
+ rx_pkts,
+ nb_pkts,
+ false);
+}
+
+uint16_t
+ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return ice_recv_scattered_pkts_vec_avx2_common(rx_queue,
+ rx_pkts,
+ nb_pkts,
+ true);
}
static __rte_always_inline void
--
1.8.3.1
next prev parent reply other threads:[~2021-06-28 7:53 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-02 2:21 [dpdk-dev] [PATCH 0/2] add Rx/Tx offload paths for ICE AVX2 Wenzhuo Lu
2021-06-02 2:21 ` [dpdk-dev] [PATCH 1/2] net/ice: add Tx AVX2 offload path Wenzhuo Lu
2021-06-02 2:21 ` [dpdk-dev] [PATCH 2/2] net/ice: add Rx " Wenzhuo Lu
2021-06-28 7:53 ` [dpdk-dev] [PATCH v2 0/2] add Rx/Tx offload paths for ICE AVX2 Wenzhuo Lu
2021-06-28 7:53 ` [dpdk-dev] [PATCH v2 1/2] net/ice: add Tx AVX2 offload path Wenzhuo Lu
2021-06-28 7:53 ` Wenzhuo Lu [this message]
2021-06-29 2:29 ` [dpdk-dev] [PATCH v3 0/2] add Rx/Tx offload paths for ICE AVX2 Wenzhuo Lu
2021-06-29 2:29 ` [dpdk-dev] [PATCH v3 1/2] net/ice: add Tx AVX2 offload path Wenzhuo Lu
2021-06-29 2:29 ` [dpdk-dev] [PATCH v3 2/2] net/ice: add Rx " Wenzhuo Lu
2021-07-02 8:03 ` [dpdk-dev] [PATCH v3 0/2] add Rx/Tx offload paths for ICE AVX2 Rong, Leyi
2021-07-06 2:57 ` Zhang, Qi Z
2021-07-06 18:39 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1624866784-2458-3-git-send-email-wenzhuo.lu@intel.com \
--to=wenzhuo.lu@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).