From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 41482A04B1; Wed, 9 Sep 2020 08:38:31 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4DE601C0CC; Wed, 9 Sep 2020 08:38:28 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id E7A851C0CA for ; Wed, 9 Sep 2020 08:38:26 +0200 (CEST) IronPort-SDR: h+ItvQ3FQLjWoQpxGFy0zXcVOIV63kXsO/C5Oj/0Ye1/VH3Uk6M3UOVFfZeVMMgthhgNi/dRrd nqagXH+qzUGw== X-IronPort-AV: E=McAfee;i="6000,8403,9738"; a="243094293" X-IronPort-AV: E=Sophos;i="5.76,408,1592895600"; d="scan'208";a="243094293" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Sep 2020 23:38:26 -0700 IronPort-SDR: GQtyVbHwrtdpcFCLNXe8aVTRlcGe9QGn9IXpIFwxlBhF9PhsGDmB9KXdkENj+Muv7mla8hw6Yk xd0vSVXLOlww== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,408,1592895600"; d="scan'208";a="300049876" Received: from npg-dpdk-cvl-jeffguo-01.sh.intel.com ([10.67.111.128]) by orsmga003.jf.intel.com with ESMTP; 08 Sep 2020 23:38:22 -0700 From: Jeff Guo To: qiming.yang@intel.com, beilei.xing@intel.com, wei.zhao1@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com Cc: bruce.richardson@intel.com, dev@dpdk.org, jia.guo@intel.com, helin.zhang@intel.com, mb@smartsharesystems.com, ferruh.yigit@intel.com, haiyue.wang@intel.com, stephen@networkplumber.org, barbette@kth.se Date: Wed, 9 Sep 2020 14:36:32 +0800 Message-Id: <20200909063636.60205-2-jia.guo@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20200909063636.60205-1-jia.guo@intel.com> References: <20200827075452.1751-1-jia.guo@intel.com> <20200909063636.60205-1-jia.guo@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v3 1/5] net/iavf: fix vector rx burst for iavf X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The limitation of burst size in vector rx was removed, since it should retrieve as much received packets as possible. And also the scattered receive path should use a wrapper function to achieve the goal of burst maximizing. And do some code cleaning for vector rx path. Signed-off-by: Jeff Guo --- drivers/net/iavf/iavf_rxtx.h | 1 + drivers/net/iavf/iavf_rxtx_vec_avx2.c | 78 ++++++++--------- drivers/net/iavf/iavf_rxtx_vec_sse.c | 119 ++++++++++++++++++-------- 3 files changed, 121 insertions(+), 77 deletions(-) diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index 59625a979..f71f9fbdb 100644 --- a/drivers/net/iavf/iavf_rxtx.h +++ b/drivers/net/iavf/iavf_rxtx.h @@ -21,6 +21,7 @@ #define IAVF_VPMD_TX_MAX_BURST 32 #define IAVF_RXQ_REARM_THRESH 32 #define IAVF_VPMD_DESCS_PER_LOOP 4 +#define IAVF_VPMD_DESCS_PER_LOOP_AVX 8 #define IAVF_VPMD_TX_MAX_FREE_BUF 64 #define IAVF_NO_VECTOR_FLAGS ( \ diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c index e5e0fd309..9816adbaa 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -29,7 +29,7 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq) __m128i dma_addr0; dma_addr0 = _mm_setzero_si128(); - for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) { + for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP_AVX; i++) { rxp[i] = &rxq->fake_mbuf; _mm_store_si128((__m128i *)&rxdp[i].read, dma_addr0); @@ -134,13 +134,19 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq) #define PKTLEN_SHIFT 10 +/** + * vPMD raw receive routine for flex RxD, + * only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP_AVX) + * + * Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP_AVX, just return no packet + * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP_AVX power-of-two + */ static inline uint16_t _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) { -#define IAVF_DESCS_PER_LOOP_AVX 8 - /* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */ const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; @@ -153,8 +159,8 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, rte_prefetch0(rxdp); - /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */ - nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX); + /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_AVX); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act @@ -297,8 +303,8 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, uint16_t i, received; for (i = 0, received = 0; i < nb_pkts; - i += IAVF_DESCS_PER_LOOP_AVX, - rxdp += IAVF_DESCS_PER_LOOP_AVX) { + i += IAVF_VPMD_DESCS_PER_LOOP_AVX, + rxdp += IAVF_VPMD_DESCS_PER_LOOP_AVX) { /* step 1, copy over 8 mbuf pointers to rx_pkts array */ _mm256_storeu_si256((void *)&rx_pkts[i], _mm256_loadu_si256((void *)&sw_ring[i])); @@ -368,7 +374,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, if (split_packet) { int j; - for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++) + for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_AVX; j++) rte_mbuf_prefetch_part2(rx_pkts[i + j]); } @@ -583,7 +589,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits); - split_packet += IAVF_DESCS_PER_LOOP_AVX; + split_packet += IAVF_VPMD_DESCS_PER_LOOP_AVX; } /* perform dd_check */ @@ -599,7 +605,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; - if (burst != IAVF_DESCS_PER_LOOP_AVX) + if (burst != IAVF_VPMD_DESCS_PER_LOOP_AVX) break; } @@ -633,13 +639,19 @@ flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7) return fdir_flags; } +/** + * vPMD raw receive routine, + * only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP_AVX) + * + * Notice: + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP_AVX, just return no packet + * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP_AVX power-of-two + */ static inline uint16_t _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, uint8_t *split_packet) { -#define IAVF_DESCS_PER_LOOP_AVX 8 - const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl; const __m256i mbuf_init = _mm256_set_epi64x(0, 0, @@ -650,8 +662,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, rte_prefetch0(rxdp); - /* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */ - nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_DESCS_PER_LOOP_AVX); + /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP_AVX); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act @@ -794,8 +806,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, uint16_t i, received; for (i = 0, received = 0; i < nb_pkts; - i += IAVF_DESCS_PER_LOOP_AVX, - rxdp += IAVF_DESCS_PER_LOOP_AVX) { + i += IAVF_VPMD_DESCS_PER_LOOP_AVX, + rxdp += IAVF_VPMD_DESCS_PER_LOOP_AVX) { /* step 1, copy over 8 mbuf pointers to rx_pkts array */ _mm256_storeu_si256((void *)&rx_pkts[i], _mm256_loadu_si256((void *)&sw_ring[i])); @@ -851,7 +863,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, if (split_packet) { int j; - for (j = 0; j < IAVF_DESCS_PER_LOOP_AVX; j++) + for (j = 0; j < IAVF_VPMD_DESCS_PER_LOOP_AVX; j++) rte_mbuf_prefetch_part2(rx_pkts[i + j]); } @@ -1193,7 +1205,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits); - split_packet += IAVF_DESCS_PER_LOOP_AVX; + split_packet += IAVF_VPMD_DESCS_PER_LOOP_AVX; } /* perform dd_check */ @@ -1209,7 +1221,7 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, (_mm_cvtsi128_si64 (_mm256_castsi256_si128(status0_7))); received += burst; - if (burst != IAVF_DESCS_PER_LOOP_AVX) + if (burst != IAVF_VPMD_DESCS_PER_LOOP_AVX) break; } @@ -1224,10 +1236,6 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, return received; } -/** - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet - */ uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1235,10 +1243,6 @@ iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, return _iavf_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); } -/** - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet - */ uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1249,8 +1253,6 @@ iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, /** * vPMD receive routine that reassembles single burst of 32 scattered packets - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet */ static uint16_t iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1259,6 +1261,9 @@ iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, struct iavf_rx_queue *rxq = rx_queue; uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + /* split_flags only can support max of IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + /* get some new buffers */ uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, split_flags); @@ -1290,9 +1295,6 @@ iavf_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, /** * vPMD receive routine that reassembles scattered packets. - * Main receive routine that can handle arbitrary burst sizes - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet */ uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1313,10 +1315,8 @@ iavf_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, } /** - * vPMD receive routine that reassembles single burst of - * 32 scattered packets for flex RxD - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet + * vPMD receive routine that reassembles single burst of 32 scattered packets + * for flex RxD */ static uint16_t iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue, @@ -1326,6 +1326,9 @@ iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue, struct iavf_rx_queue *rxq = rx_queue; uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; + /* split_flags only can support max of IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + /* get some new buffers */ uint16_t nb_bufs = _iavf_recv_raw_pkts_vec_avx2_flex_rxd(rxq, rx_pkts, nb_pkts, split_flags); @@ -1357,9 +1360,6 @@ iavf_recv_scattered_burst_vec_avx2_flex_rxd(void *rx_queue, /** * vPMD receive routine that reassembles scattered packets for flex RxD. - * Main receive routine that can handle arbitrary burst sizes - * Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet */ uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue, diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c index 85c5bd4af..b5362ecf3 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c @@ -379,10 +379,12 @@ flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, rx_pkts[3]->packet_type = type_table[_mm_extract_epi16(ptype_all, 7)]; } -/* Notice: +/** + * vPMD raw receive routine, only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP) + * + * Notice: * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet - * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits + * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two */ static inline uint16_t _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, @@ -413,9 +415,6 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); __m128i dd_check, eop_check; - /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */ - nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); - /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP); @@ -627,10 +626,13 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, return nb_pkts_recd; } -/* Notice: +/** + * vPMD raw receive routine for flex RxD, + * only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP) + * + * Notice: * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet - * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits + * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two */ static inline uint16_t _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, @@ -688,9 +690,6 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL); - /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */ - nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); - /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP); @@ -921,11 +920,6 @@ _recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq, return nb_pkts_recd; } -/* Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet - * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits - */ uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -933,11 +927,6 @@ iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } -/* Notice: - * - nb_pkts < IAVF_DESCS_PER_LOOP, just return no packet - * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits - */ uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -945,20 +934,20 @@ iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL); } -/* vPMD receive routine that reassembles scattered packets - * Notice: - * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet - * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets */ -uint16_t -iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +static uint16_t +iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { struct iavf_rx_queue *rxq = rx_queue; uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; unsigned int i = 0; + /* split_flags only can support max of IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + /* get some new buffers */ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, split_flags); @@ -986,21 +975,48 @@ iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, &split_flags[i]); } -/* vPMD receive routine that reassembles scattered packets for flex RxD - * Notice: - * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet - * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST - * numbers of DD bits +/** + * vPMD receive routine that reassembles scattered packets. */ uint16_t -iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, - struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) { + uint16_t burst; + + burst = iavf_recv_scattered_burst_vec(rx_queue, + rx_pkts + retval, + IAVF_VPMD_RX_MAX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < IAVF_VPMD_RX_MAX_BURST) + return retval; + } + + return retval + iavf_recv_scattered_burst_vec(rx_queue, + rx_pkts + retval, + nb_pkts); +} + +/** + * vPMD receive routine that reassembles single burst of 32 scattered packets + * for flex RxD + */ +static uint16_t +iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { struct iavf_rx_queue *rxq = rx_queue; uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0}; unsigned int i = 0; + /* split_flags only can support max of IAVF_VPMD_RX_MAX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST); + /* get some new buffers */ uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts, split_flags); @@ -1028,6 +1044,33 @@ iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, &split_flags[i]); } +/** + * vPMD receive routine that reassembles scattered packets for flex RxD + */ +uint16_t +iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + + while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) { + uint16_t burst; + + burst = iavf_recv_scattered_burst_vec_flex_rxd(rx_queue, + rx_pkts + retval, + IAVF_VPMD_RX_MAX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < IAVF_VPMD_RX_MAX_BURST) + return retval; + } + + return retval + iavf_recv_scattered_burst_vec_flex_rxd(rx_queue, + rx_pkts + retval, + nb_pkts); +} + static inline void vtx1(volatile struct iavf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags) { -- 2.20.1