* [dpdk-dev] [PATCH 1/8 v2] add vector PMD RX for FVL
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 2/8 v2] add vector PMD TX " Zhe Tao
` (7 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
The vPMD RX function uses the multi-buffer and SSE instructions to
accelerate the RX speed, but now the pktype cannot be supported by the vPMD RX,
because it will decrease the performance heavily.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
config/common_bsdapp | 2 +
config/common_linuxapp | 2 +
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/base/i40e_type.h | 3 +
drivers/net/i40e/i40e_rxtx.c | 28 ++-
drivers/net/i40e/i40e_rxtx.h | 20 +-
drivers/net/i40e/i40e_rxtx_vec.c | 484 ++++++++++++++++++++++++++++++++++++++
7 files changed, 535 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
diff --git a/config/common_bsdapp b/config/common_bsdapp
index b37dcf4..3003da5 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -186,6 +186,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0de43d5..dadba4d 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -184,6 +184,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 55b7d31..d4695cb 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -95,6 +95,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 6ee398e..2720177 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -969,6 +969,9 @@ enum i40e_tx_desc_cmd_bits {
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT)
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..dfdc7d5 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1788,9 +1788,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -2625,6 +2622,9 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
void
@@ -3063,3 +3063,25 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 4385142..61d9a0e 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -44,9 +44,15 @@
#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
#define RTE_PMD_I40E_RX_MAX_BURST 32
-#endif
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
@@ -100,6 +106,11 @@ struct i40e_rx_queue {
struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
uint8_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
@@ -210,4 +221,9 @@ uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
new file mode 100644
index 0000000..470e6fa
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -0,0 +1,484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i vlan0, vlan1, rss;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except rss and vlan flags
+ *bit2 is for vlan tag, bits 13:12 for rss
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3004, 0x3004, 0x3004, 0x3004);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN_PKT,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
+
+ vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi16(vlan1, 12);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vol.dword = _mm_cvtsi128_si64(vlan0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT (6)
+#define PKTLEN_MASK (0x3FFF)
+/* Handling the pkt len field is not aligned with 1byte, so shift is
+ * needed to let it align
+ */
+static inline void
+desc_pktlen_align(__m128i descs[4])
+{
+ __m128i pktlen0, pktlen1, zero;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except pktlen field*/
+ const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
+ PKTLEN_MASK, PKTLEN_MASK);
+
+ pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
+ pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
+ pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
+
+ zero = _mm_xor_si128(pktlen0, pktlen0);
+
+ pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
+ pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
+
+ pktlen0 = _mm_packs_epi32(pktlen0, zero);
+ vol.dword = _mm_cvtsi128_si64(pktlen0);
+ /* let the descriptor byte 15-14 store the pkt len */
+ *((uint16_t *)&descs[0]+7) = vol.e[0];
+ *((uint16_t *)&descs[1]+7) = vol.e[1];
+ *((uint16_t *)&descs[2]+7) = vol.e[2];
+ *((uint16_t *)&descs[3]+7) = vol.e[3];
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ /*shift the pktlen field*/
+ desc_pktlen_align(descs);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 2/8 v2] add vector PMD TX for FVL
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 1/8 v2] add vector PMD RX " Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 3/8 v2] add vector PMD scatter RX " Zhe Tao
` (6 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
The way to increase the performance of the vPMD TX is to use some fast mbuf
release method compares to the scalar TX.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 8 ++
drivers/net/i40e/i40e_rxtx.h | 3 +
drivers/net/i40e/i40e_rxtx_vec.c | 162 +++++++++++++++++++++++++++++++++++++++
3 files changed, 173 insertions(+)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index dfdc7d5..8fac220 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3085,3 +3085,11 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 61d9a0e..3b0af9a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -224,6 +224,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 470e6fa..4ae334d 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -447,6 +447,162 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
void __attribute__((cold))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
@@ -482,3 +638,9 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
rxq->mbuf_initializer = *(uint64_t *)p;
return 0;
}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 3/8 v2] add vector PMD scatter RX for FVL
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 1/8 v2] add vector PMD RX " Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 2/8 v2] add vector PMD TX " Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 4/8 v2] add RX and TX selection function " Zhe Tao
` (5 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
To support the multiple segments in one packets when the received pkts exceed
one buffer size.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 9 ++++
drivers/net/i40e/i40e_rxtx.h | 3 ++
drivers/net/i40e/i40e_rxtx_vec.c | 103 +++++++++++++++++++++++++++++++++++++++
3 files changed, 115 insertions(+)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8fac220..d37cbde 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3074,6 +3074,15 @@ i40e_recv_pkts_vec(
return 0;
}
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int __attribute__((weak))
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 3b0af9a..8d4f53a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -223,6 +223,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 4ae334d..86a84af 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -447,6 +447,109 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
static inline void
vtx1(volatile struct i40e_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 4/8 v2] add RX and TX selection function for FVL
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (2 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 3/8 v2] add vector PMD scatter RX " Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 5/8 v2] edit the comments Zhe Tao
` (4 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
To support FVL PMD can select which RX and TX function should be used
according to the queue config.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/base/i40e_type.h | 10 +++
drivers/net/i40e/i40e_ethdev.c | 19 +++-
drivers/net/i40e/i40e_ethdev_vf.c | 27 ++++--
drivers/net/i40e/i40e_rxtx.c | 177 +++++++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 6 ++
drivers/net/i40e/i40e_rxtx_vec.c | 32 +++++++
6 files changed, 251 insertions(+), 20 deletions(-)
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 2720177..9491f94 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -113,6 +113,11 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
/* Number of Transmit Descriptors must be a multiple of 8. */
#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
/* Number of Receive Descriptors must be a multiple of 32 if
@@ -563,6 +568,11 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
+
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..e241f66 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -403,8 +403,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (dev->data->scattered_rx)
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
return 0;
}
pci_dev = dev->pci_dev;
@@ -671,9 +671,18 @@ static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int ret;
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ hw->rx_bulk_alloc_allowed = true;
+ hw->rx_vec_allowed = true;
+ hw->tx_simple_allowed = true;
+ hw->tx_vec_allowed = true;
+
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@ -3707,6 +3716,9 @@ i40e_dev_tx_init(struct i40e_pf *pf)
if (ret != I40E_SUCCESS)
break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
@@ -3733,6 +3745,9 @@ i40e_dev_rx_init(struct i40e_pf *pf)
break;
}
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..bdea8f0 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1182,8 +1182,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
return 0;
}
@@ -1277,6 +1277,16 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ hw->rx_bulk_alloc_allowed = true;
+ hw->rx_vec_allowed = true;
+ hw->tx_simple_allowed = true;
+ hw->tx_vec_allowed = true;
+
return i40evf_init_vlan(dev);
}
@@ -1508,7 +1518,6 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
if (dev_data->dev_conf.rxmode.enable_scatter ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
return 0;
@@ -1519,6 +1528,7 @@ i40evf_rx_init(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t i;
+ int ret = I40E_SUCCESS;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
@@ -1526,11 +1536,14 @@ i40evf_rx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!rxq[i] || !rxq[i]->q_set)
continue;
- if (i40evf_rxq_init(dev, rxq[i]) < 0)
- return -EFAULT;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
- return 0;
+ return ret;
}
static void
@@ -1543,6 +1556,8 @@ i40evf_tx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++)
txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
}
static inline void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index d37cbde..71f601f 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2213,13 +2213,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
- if (!use_def_burst_func && !dev->data->scattered_rx) {
+ if (!use_def_burst_func) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -2227,6 +2226,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
+ hw->rx_bulk_alloc_allowed = false;
}
return 0;
@@ -2488,14 +2488,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
/* Use a simple TX queue without offloads or multi segs if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts;
- }
+ i40e_set_tx_function_flag(dev, txq);
return 0;
}
@@ -2564,6 +2557,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
{
uint16_t i;
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
if (!rxq || !rxq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
@@ -2837,7 +2836,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
int err = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
@@ -2893,7 +2891,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
/* Init the RX tail regieter. */
@@ -3064,7 +3061,156 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ hw->rx_vec_allowed = false;
+ }
+ if (hw->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (i40e_rxq_vec_setup(rxq)) {
+ hw->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (hw->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (hw->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ } else if (hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ hw->tx_vec_allowed = false;
+ }
+ } else {
+ hw->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (hw->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (i40e_txq_vec_setup(txq)) {
+ hw->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (hw->tx_simple_allowed) {
+ if (hw->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
uint16_t __attribute__((weak))
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
@@ -3089,6 +3235,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
return -1;
}
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
void __attribute__((weak))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
@@ -3102,3 +3254,4 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
{
return 0;
}
+
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 8d4f53a..dc575fd 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -124,6 +124,7 @@ struct i40e_rx_queue {
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
};
struct i40e_tx_entry {
@@ -226,10 +227,15 @@ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 86a84af..b2993e9 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -747,3 +747,35 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 5/8 v2] edit the comments
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (3 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 4/8 v2] add RX and TX selection function " Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 6/8 v2] change the postion of data prefetch for splitter packets Zhe Tao
` (3 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
edit the comments for the minimum packets support for vector RX
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx_vec.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index b2993e9..6f89002 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -225,7 +225,7 @@ desc_pktlen_align(__m128i descs[4])
* in one loop
*
* Notice:
- * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
* - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
* numbers of DD bits
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 6/8 v2] change the postion of data prefetch for splitter packets
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (4 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 5/8 v2] edit the comments Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 7/8 v2] move all the extra definition out of share code Zhe Tao
` (2 subsequent siblings)
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
according to ixgbe vector implementation, change the data prefetch position
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx_vec.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 6f89002..f5b2db6 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -314,13 +314,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
__m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
- if (split_packet) {
- rte_prefetch0(&rx_pkts[pos]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
- rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
- }
-
/* B.1 load 1 mbuf point */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -341,6 +334,13 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
/*shift the pktlen field*/
desc_pktlen_align(descs);
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 7/8 v2] move all the extra definition out of share code
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (5 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 6/8 v2] change the postion of data prefetch for splitter packets Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 10:52 ` [dpdk-dev] [PATCH 8/8 v2] update the release note Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
move all the exta definition out of share code
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/base/i40e_type.h | 13 -------------
drivers/net/i40e/i40e_ethdev.c | 11 ++++++-----
drivers/net/i40e/i40e_ethdev.h | 6 ++++++
drivers/net/i40e/i40e_ethdev_vf.c | 11 ++++++-----
drivers/net/i40e/i40e_rxtx.c | 39 ++++++++++++++++++++++-----------------
drivers/net/i40e/i40e_rxtx.h | 8 ++++++++
6 files changed, 48 insertions(+), 40 deletions(-)
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 9491f94..6ee398e 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -113,11 +113,6 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
-#undef container_of
-#define container_of(ptr, type, member) ({ \
- typeof(((type *)0)->member)(*__mptr) = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
-
/* Number of Transmit Descriptors must be a multiple of 8. */
#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
/* Number of Receive Descriptors must be a multiple of 32 if
@@ -568,11 +563,6 @@ struct i40e_hw {
/* debug mask */
u32 debug_mask;
-
- bool rx_bulk_alloc_allowed;
- bool rx_vec_allowed;
- bool tx_simple_allowed;
- bool tx_vec_allowed;
};
static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -979,9 +969,6 @@ enum i40e_tx_desc_cmd_bits {
#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
I40E_TXD_QW1_OFFSET_SHIFT)
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
enum i40e_tx_desc_length_fields {
/* Note: These are predefined bit offsets */
I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e241f66..153be45 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -670,18 +670,19 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int ret;
/* Initialize to TRUE. If any of Rx queues doesn't meet the
* bulk allocation or vector Rx preconditions we will reset it.
*/
- hw->rx_bulk_alloc_allowed = true;
- hw->rx_vec_allowed = true;
- hw->tx_simple_allowed = true;
- hw->tx_vec_allowed = true;
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..ab78c54 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -462,6 +462,12 @@ struct i40e_adapter {
struct i40e_pf pf;
struct i40e_vf vf;
};
+
+ /* for vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index bdea8f0..b8ebacb 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1277,15 +1277,16 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
*/
- hw->rx_bulk_alloc_allowed = true;
- hw->rx_vec_allowed = true;
- hw->tx_simple_allowed = true;
- hw->tx_vec_allowed = true;
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
return i40evf_init_vlan(dev);
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 71f601f..8731712 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
@@ -2226,7 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- hw->rx_bulk_alloc_allowed = false;
+ ad->rx_bulk_alloc_allowed = false;
}
return 0;
@@ -3064,27 +3066,28 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
uint16_t rx_using_sse, i;
/* In order to allow Vector Rx there are a few configuration
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (i40e_rx_vec_dev_conf_condition_check(dev) ||
- !hw->rx_bulk_alloc_allowed) {
+ !ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
" Vector Rx preconditions",
dev->data->port_id);
- hw->rx_vec_allowed = false;
+ ad->rx_vec_allowed = false;
}
- if (hw->rx_vec_allowed) {
+ if (ad->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq =
dev->data->rx_queues[i];
if (i40e_rxq_vec_setup(rxq)) {
- hw->rx_vec_allowed = false;
+ ad->rx_vec_allowed = false;
break;
}
}
@@ -3095,7 +3098,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
/* Set the non-LRO scattered callback: there are Vector and
* single allocation versions.
*/
- if (hw->rx_vec_allowed) {
+ if (ad->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
"callback (port=%d).",
dev->data->port_id);
@@ -3113,14 +3116,14 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
* - Bulk Allocation
* - Single buffer allocation (the simplest one)
*/
- } else if (hw->rx_vec_allowed) {
+ } else if (ad->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
"burst size no less than %d (port=%d).",
RTE_I40E_DESCS_PER_LOOP,
dev->data->port_id);
dev->rx_pkt_burst = i40e_recv_pkts_vec;
- } else if (hw->rx_bulk_alloc_allowed) {
+ } else if (ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
@@ -3153,7 +3156,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
void __attribute__((cold))
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
@@ -3163,35 +3167,36 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
" can be enabled on this txq.");
} else {
- hw->tx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
}
} else {
- hw->tx_simple_allowed = false;
+ ad->tx_simple_allowed = false;
}
}
void __attribute__((cold))
i40e_set_tx_function(struct rte_eth_dev *dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
int i;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (hw->tx_vec_allowed) {
+ if (ad->tx_vec_allowed) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct i40e_tx_queue *txq =
dev->data->tx_queues[i];
if (i40e_txq_vec_setup(txq)) {
- hw->tx_vec_allowed = false;
+ ad->tx_vec_allowed = false;
break;
}
}
}
}
- if (hw->tx_simple_allowed) {
- if (hw->tx_vec_allowed) {
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_vec;
} else {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index dc575fd..39cb95a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -57,6 +57,14 @@
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 8/8 v2] update the release note
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (6 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 7/8 v2] move all the extra definition out of share code Zhe Tao
@ 2015-10-30 10:52 ` Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
8 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 10:52 UTC (permalink / raw)
To: dev
update the release note
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
doc/guides/rel_notes/release_2_2.rst | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst
index bc9b00f..c78ceca 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -39,6 +39,10 @@ Drivers
Fixed i40e issue that occurred when a DPDK application didn't initialize
ports if memory wasn't available on socket 0.
+* **i40e: Add vector PMD support for FVL.**
+
+ Add vector RX/TX support for FVL
+
* **vhost: Fixed Qemu shutdown.**
Fixed issue with libvirt ``virsh destroy`` not killing the VM.
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL
2015-10-30 10:52 ` [dpdk-dev] [PATCH 0/8 v2] i40e: add vector PMD support " Zhe Tao
` (7 preceding siblings ...)
2015-10-30 10:52 ` [dpdk-dev] [PATCH 8/8 v2] update the release note Zhe Tao
@ 2015-10-30 13:01 ` Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX " Zhe Tao
` (4 more replies)
8 siblings, 5 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 13:01 UTC (permalink / raw)
To: dev
This patch set add the vector PMD support for FVL.
FVL vPMD works like the way ixgbe does
All the functionality is tested
PATCH v1: add the vector PMD support for FVL
PATCH v2: remove the extra code out of share code
change the prefetch position for splitter packets support
update the release note
PATCH v3: make the new patch set
Zhe Tao (4):
add vector PMD RX for FVL
add vector PMD TX for FVL
add vector PMD scatter RX for FVL
add RX and TX selection function for FVL
config/common_bsdapp | 2 +
config/common_linuxapp | 2 +
doc/guides/rel_notes/release_2_2.rst | 4 +
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 20 +-
drivers/net/i40e/i40e_ethdev.h | 6 +
drivers/net/i40e/i40e_ethdev_vf.c | 28 +-
drivers/net/i40e/i40e_rxtx.c | 227 +++++++++-
drivers/net/i40e/i40e_rxtx.h | 40 +-
drivers/net/i40e/i40e_rxtx_vec.c | 781 +++++++++++++++++++++++++++++++++++
10 files changed, 1086 insertions(+), 25 deletions(-)
create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
@ 2015-10-30 13:01 ` Zhe Tao
2015-10-30 13:40 ` Liang, Cunming
2015-10-30 13:01 ` [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX " Zhe Tao
` (3 subsequent siblings)
4 siblings, 1 reply; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 13:01 UTC (permalink / raw)
To: dev
The vPMD RX function uses the multi-buffer and SSE instructions to
accelerate the RX speed, but now the pktype cannot be supported by the vPMD RX,
because it will decrease the performance heavily.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
config/common_bsdapp | 2 +
config/common_linuxapp | 2 +
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_rxtx.c | 28 ++-
drivers/net/i40e/i40e_rxtx.h | 28 ++-
drivers/net/i40e/i40e_rxtx_vec.c | 484 +++++++++++++++++++++++++++++++++++++++
6 files changed, 540 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
diff --git a/config/common_bsdapp b/config/common_bsdapp
index b37dcf4..3003da5 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -186,6 +186,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0de43d5..dadba4d 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -184,6 +184,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 55b7d31..d4695cb 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -95,6 +95,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..dfdc7d5 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1788,9 +1788,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -2625,6 +2622,9 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
void
@@ -3063,3 +3063,25 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 4385142..961a415 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -44,13 +44,27 @@
#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
#define RTE_PMD_I40E_RX_MAX_BURST 32
-#endif
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
@@ -100,6 +114,11 @@ struct i40e_rx_queue {
struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
uint8_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
@@ -210,4 +229,9 @@ uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
new file mode 100644
index 0000000..a95916b
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -0,0 +1,484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i vlan0, vlan1, rss;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except rss and vlan flags
+ *bit2 is for vlan tag, bits 13:12 for rss
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3004, 0x3004, 0x3004, 0x3004);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN_PKT,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
+
+ vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi16(vlan1, 12);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vol.dword = _mm_cvtsi128_si64(vlan0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT (6)
+#define PKTLEN_MASK (0x3FFF)
+/* Handling the pkt len field is not aligned with 1byte, so shift is
+ * needed to let it align
+ */
+static inline void
+desc_pktlen_align(__m128i descs[4])
+{
+ __m128i pktlen0, pktlen1, zero;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except pktlen field*/
+ const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
+ PKTLEN_MASK, PKTLEN_MASK);
+
+ pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
+ pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
+ pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
+
+ zero = _mm_xor_si128(pktlen0, pktlen0);
+
+ pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
+ pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
+
+ pktlen0 = _mm_packs_epi32(pktlen0, zero);
+ vol.dword = _mm_cvtsi128_si64(pktlen0);
+ /* let the descriptor byte 15-14 store the pkt len */
+ *((uint16_t *)&descs[0]+7) = vol.e[0];
+ *((uint16_t *)&descs[1]+7) = vol.e[1];
+ *((uint16_t *)&descs[2]+7) = vol.e[2];
+ *((uint16_t *)&descs[3]+7) = vol.e[3];
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /*shift the pktlen field*/
+ desc_pktlen_align(descs);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX " Zhe Tao
@ 2015-10-30 13:40 ` Liang, Cunming
0 siblings, 0 replies; 36+ messages in thread
From: Liang, Cunming @ 2015-10-30 13:40 UTC (permalink / raw)
To: Tao, Zhe, dev
Hi,
> -----Original Message-----
> From: Tao, Zhe
> Sent: Friday, October 30, 2015 9:02 PM
> To: dev@dpdk.org
> Cc: Tao, Zhe; Liang, Cunming
> Subject: [dpdk-dev][PATCH 1/4 v3] add vector PMD RX for FVL
>
> The vPMD RX function uses the multi-buffer and SSE instructions to
> accelerate the RX speed, but now the pktype cannot be supported by the vPMD
> RX,
> because it will decrease the performance heavily.
>
> Signed-off-by: Zhe Tao <zhe.tao@intel.com>
> ---
> config/common_bsdapp | 2 +
> config/common_linuxapp | 2 +
> drivers/net/i40e/Makefile | 1 +
> drivers/net/i40e/i40e_rxtx.c | 28 ++-
> drivers/net/i40e/i40e_rxtx.h | 28 ++-
> drivers/net/i40e/i40e_rxtx_vec.c | 484
> +++++++++++++++++++++++++++++++++++++++
> 6 files changed, 540 insertions(+), 5 deletions(-)
> create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
>
[...]
> +void __attribute__((weak))
> +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
> +{
> + return;
> +}
> diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
> index 4385142..961a415 100644
> --- a/drivers/net/i40e/i40e_rxtx.h
> +++ b/drivers/net/i40e/i40e_rxtx.h
> @@ -44,13 +44,27 @@
> #define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
> #define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
>
> -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
> #define RTE_PMD_I40E_RX_MAX_BURST 32
> -#endif
> +#define RTE_PMD_I40E_TX_MAX_BURST 32
> +
> +#define RTE_I40E_VPMD_RX_BURST 32
> +#define RTE_I40E_VPMD_TX_BURST 32
> +#define RTE_I40E_RXQ_REARM_THRESH 32
> +#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
> +#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
> +#define RTE_I40E_DESCS_PER_LOOP 4
>
> #define I40E_RXBUF_SZ_1024 1024
> #define I40E_RXBUF_SZ_2048 2048
>
> +#undef container_of
> +#define container_of(ptr, type, member) ({ \
> + typeof(((type *)0)->member)(*__mptr) = (ptr); \
> + (type *)((char *)__mptr - offsetof(type, member)); })
> +
> +#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
> + I40E_TX_DESC_CMD_EOP)
> +
> enum i40e_header_split_mode {
> i40e_header_split_none = 0,
> i40e_header_split_enabled = 1,
> @@ -100,6 +114,11 @@ struct i40e_rx_queue {
> struct rte_mbuf fake_mbuf; /**< dummy mbuf */
> struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
> #endif
> +
> + uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
> + uint16_t rxrearm_start; /**< the idx we start the re-arming from */
> + uint64_t mbuf_initializer; /**< value to init mbufs */
> +
> uint8_t port_id; /**< device port ID */
> uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
> uint16_t queue_id; /**< RX queue index */
> @@ -210,4 +229,9 @@ uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev
> *dev,
> uint16_t rx_queue_id);
> int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
>
> +uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
> +int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
> +void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
> +
> #endif /* _I40E_RXTX_H_ */
> diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
> new file mode 100644
> index 0000000..a95916b
> --- /dev/null
> +++ b/drivers/net/i40e/i40e_rxtx_vec.c
> @@ -0,0 +1,484 @@
[...]
> +
> +#include <tmmintrin.h>
> +
> +#ifndef __INTEL_COMPILER
> +#pragma GCC diagnostic ignored "-Wcast-qual"
> +#endif
> +
> +static inline void
> +i40e_rxq_rearm(struct i40e_rx_queue *rxq)
> +{
> + int i;
> + uint16_t rx_id;
> +
Tiny typo, not necessary to reserve a blank line between these two definition.
> + volatile union i40e_rx_desc *rxdp;
> + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
> + struct rte_mbuf *mb0, *mb1;
> + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
> + RTE_PKTMBUF_HEADROOM);
> + __m128i dma_addr0, dma_addr1;
> +
[...]
> +
> + /* vPMD receive routine, now only accept (nb_pkts ==
> RTE_I40E_VPMD_RX_BURST)
> + * in one loop
> + *
> + * Notice:
> + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
> + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan
> RTE_I40E_VPMD_RX_BURST
> + * numbers of DD bits
> +
Remove the blank line.
> + */
> +static inline uint16_t
> +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts, uint8_t *split_packet)
> +{
[...]
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX " Zhe Tao
@ 2015-10-30 13:01 ` Zhe Tao
2015-10-30 13:29 ` Liang, Cunming
2015-11-03 21:20 ` Stephen Hemminger
2015-10-30 13:01 ` [dpdk-dev] [PATCH 3/4 v3] add vector PMD scatter RX " Zhe Tao
` (2 subsequent siblings)
4 siblings, 2 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 13:01 UTC (permalink / raw)
To: dev
The way to increase the performance of the vPMD TX is to use some fast mbuf
release method compares to the scalar TX.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 8 ++
drivers/net/i40e/i40e_rxtx.h | 3 +
drivers/net/i40e/i40e_rxtx_vec.c | 162 +++++++++++++++++++++++++++++++++++++++
3 files changed, 173 insertions(+)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index dfdc7d5..8fac220 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3085,3 +3085,11 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 961a415..6dea402 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -232,6 +232,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index a95916b..ba7987d 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -447,6 +447,162 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
void __attribute__((cold))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
@@ -482,3 +638,9 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
rxq->mbuf_initializer = *(uint64_t *)p;
return 0;
}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX " Zhe Tao
@ 2015-10-30 13:29 ` Liang, Cunming
2015-11-03 21:20 ` Stephen Hemminger
1 sibling, 0 replies; 36+ messages in thread
From: Liang, Cunming @ 2015-10-30 13:29 UTC (permalink / raw)
To: Tao, Zhe, dev
Hi,
> -----Original Message-----
> From: Tao, Zhe
> Sent: Friday, October 30, 2015 9:02 PM
> To: dev@dpdk.org
> Cc: Tao, Zhe; Liang, Cunming
> Subject: [dpdk-dev][PATCH 2/4 v3] add vector PMD TX for FVL
>
> The way to increase the performance of the vPMD TX is to use some fast mbuf
> release method compares to the scalar TX.
>
> Signed-off-by: Zhe Tao <zhe.tao@intel.com>
> ---
> drivers/net/i40e/i40e_rxtx.c | 8 ++
> drivers/net/i40e/i40e_rxtx.h | 3 +
> drivers/net/i40e/i40e_rxtx_vec.c | 162
> +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 173 insertions(+)
> +
[...]
> +static inline int __attribute__((always_inline))
> +i40e_tx_free_bufs(struct i40e_tx_queue *txq)
> +{
> + struct i40e_tx_entry *txep;
> + uint32_t n;
> + uint32_t i;
> + int nb_free = 0;
> + struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
Tiny format issue, require a blank line here.
> + /* check DD bits on threshold descriptor */
> + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
> + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
> + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
> + return 0;
> +
> + n = txq->tx_rs_thresh;
> +
[...]
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX " Zhe Tao
2015-10-30 13:29 ` Liang, Cunming
@ 2015-11-03 21:20 ` Stephen Hemminger
1 sibling, 0 replies; 36+ messages in thread
From: Stephen Hemminger @ 2015-11-03 21:20 UTC (permalink / raw)
To: Zhe Tao; +Cc: dev
On Fri, 30 Oct 2015 21:01:53 +0800
Zhe Tao <zhe.tao@intel.com> wrote:
> static inline int __attribute__((always_inline))
> +i40e_tx_free_bufs(struct i40e_tx_queue *txq)
> +{
> + struct i40e_tx_entry *txep;
> + uint32_t n;
> + uint32_t i;
> + int nb_free = 0;
> + struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
Although C variable and function namespaces are different.
It is best not to name variables the same as std library functions
to avoid typo errors.
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 3/4 v3] add vector PMD scatter RX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 1/4 v3] add vector PMD RX " Zhe Tao
2015-10-30 13:01 ` [dpdk-dev] [PATCH 2/4 v3] add vector PMD TX " Zhe Tao
@ 2015-10-30 13:01 ` Zhe Tao
2015-10-30 13:46 ` Liang, Cunming
2015-10-30 13:01 ` [dpdk-dev] [PATCH 4/4 v3] add RX and TX selection function " Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
4 siblings, 1 reply; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 13:01 UTC (permalink / raw)
To: dev
To support the multiple segments in one packets when the received pkts exceed
one buffer size.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 9 ++++
drivers/net/i40e/i40e_rxtx.h | 3 ++
drivers/net/i40e/i40e_rxtx_vec.c | 103 +++++++++++++++++++++++++++++++++++++++
3 files changed, 115 insertions(+)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8fac220..d37cbde 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3074,6 +3074,15 @@ i40e_recv_pkts_vec(
return 0;
}
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int __attribute__((weak))
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 6dea402..98d6e90 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -231,6 +231,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index ba7987d..50eafef 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -447,6 +447,109 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ *
+ * Notice:
+ * - now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
static inline void
vtx1(volatile struct i40e_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 3/4 v3] add vector PMD scatter RX for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 3/4 v3] add vector PMD scatter RX " Zhe Tao
@ 2015-10-30 13:46 ` Liang, Cunming
0 siblings, 0 replies; 36+ messages in thread
From: Liang, Cunming @ 2015-10-30 13:46 UTC (permalink / raw)
To: Tao, Zhe, dev
Hi,
> -----Original Message-----
> From: Tao, Zhe
> Sent: Friday, October 30, 2015 9:02 PM
> To: dev@dpdk.org
> Cc: Tao, Zhe; Liang, Cunming
> Subject: [dpdk-dev][PATCH 3/4 v3] add vector PMD scatter RX for FVL
>
> To support the multiple segments in one packets when the received pkts exceed
> one buffer size.
>
> Signed-off-by: Zhe Tao <zhe.tao@intel.com>
> ---
> drivers/net/i40e/i40e_rxtx.c | 9 ++++
> drivers/net/i40e/i40e_rxtx.h | 3 ++
> drivers/net/i40e/i40e_rxtx_vec.c | 103
> +++++++++++++++++++++++++++++++++++++++
> 3 files changed, 115 insertions(+)
>
[...]
> + /* save the partial packet for next time */
> + rxq->pkt_first_seg = start;
> + rxq->pkt_last_seg = end;
> + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
> + return pkt_idx;
> +}
> +
> + /* vPMD receive routine that reassembles scattered packets
> + *
> + * Notice:
> + * - now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
> + */
I think the comments here is not correct, have a check.
> +uint16_t
> +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> +
> + struct i40e_rx_queue *rxq = rx_queue;
> + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
> +
> + /* get some new buffers */
> + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
> + split_flags);
> + if (nb_bufs == 0)
> + return 0;
> +
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 4/4 v3] add RX and TX selection function for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
` (2 preceding siblings ...)
2015-10-30 13:01 ` [dpdk-dev] [PATCH 3/4 v3] add vector PMD scatter RX " Zhe Tao
@ 2015-10-30 13:01 ` Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
4 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 13:01 UTC (permalink / raw)
To: dev
To support FVL PMD can select which RX and TX function should be used
according to the queue config.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
doc/guides/rel_notes/release_2_2.rst | 4 +
drivers/net/i40e/i40e_ethdev.c | 20 +++-
drivers/net/i40e/i40e_ethdev.h | 6 ++
drivers/net/i40e/i40e_ethdev_vf.c | 28 ++++--
drivers/net/i40e/i40e_rxtx.c | 182 ++++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 6 ++
drivers/net/i40e/i40e_rxtx_vec.c | 32 ++++++
7 files changed, 258 insertions(+), 20 deletions(-)
diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst
index bc9b00f..c78ceca 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -39,6 +39,10 @@ Drivers
Fixed i40e issue that occurred when a DPDK application didn't initialize
ports if memory wasn't available on socket 0.
+* **i40e: Add vector PMD support for FVL.**
+
+ Add vector RX/TX support for FVL
+
* **vhost: Fixed Qemu shutdown.**
Fixed issue with libvirt ``virsh destroy`` not killing the VM.
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..153be45 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -403,8 +403,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (dev->data->scattered_rx)
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
return 0;
}
pci_dev = dev->pci_dev;
@@ -670,10 +670,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int ret;
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@ -3707,6 +3717,9 @@ i40e_dev_tx_init(struct i40e_pf *pf)
if (ret != I40E_SUCCESS)
break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
@@ -3733,6 +3746,9 @@ i40e_dev_rx_init(struct i40e_pf *pf)
break;
}
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..ab78c54 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -462,6 +462,12 @@ struct i40e_adapter {
struct i40e_pf pf;
struct i40e_vf vf;
};
+
+ /* for vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..b8ebacb 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1182,8 +1182,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
return 0;
}
@@ -1277,6 +1277,17 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
return i40evf_init_vlan(dev);
}
@@ -1508,7 +1519,6 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
if (dev_data->dev_conf.rxmode.enable_scatter ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
return 0;
@@ -1519,6 +1529,7 @@ i40evf_rx_init(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t i;
+ int ret = I40E_SUCCESS;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
@@ -1526,11 +1537,14 @@ i40evf_rx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!rxq[i] || !rxq[i]->q_set)
continue;
- if (i40evf_rxq_init(dev, rxq[i]) < 0)
- return -EFAULT;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
- return 0;
+ return ret;
}
static void
@@ -1543,6 +1557,8 @@ i40evf_tx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++)
txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
}
static inline void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index d37cbde..8731712 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
@@ -2213,13 +2215,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
- if (!use_def_burst_func && !dev->data->scattered_rx) {
+ if (!use_def_burst_func) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -2227,6 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
}
return 0;
@@ -2488,14 +2490,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
/* Use a simple TX queue without offloads or multi segs if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts;
- }
+ i40e_set_tx_function_flag(dev, txq);
return 0;
}
@@ -2564,6 +2559,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
{
uint16_t i;
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
if (!rxq || !rxq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
@@ -2837,7 +2838,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
int err = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
@@ -2893,7 +2893,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
/* Init the RX tail regieter. */
@@ -3064,7 +3063,159 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ } else {
+ ad->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
uint16_t __attribute__((weak))
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
@@ -3089,6 +3240,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
return -1;
}
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
void __attribute__((weak))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
@@ -3102,3 +3259,4 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
{
return 0;
}
+
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 98d6e90..39cb95a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -132,6 +132,7 @@ struct i40e_rx_queue {
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
};
struct i40e_tx_entry {
@@ -234,10 +235,15 @@ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 50eafef..f5b2db6 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -747,3 +747,35 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support for FVL
2015-10-30 13:01 ` [dpdk-dev] [PATCH 0/4 v3] i40e: add vector PMD support for FVL Zhe Tao
` (3 preceding siblings ...)
2015-10-30 13:01 ` [dpdk-dev] [PATCH 4/4 v3] add RX and TX selection function " Zhe Tao
@ 2015-10-30 14:16 ` Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX " Zhe Tao
` (4 more replies)
4 siblings, 5 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 14:16 UTC (permalink / raw)
To: dev
This patch set add the vector PMD support for FVL.
FVL vPMD works like the way ixgbe does
All the functionality is tested
PATCH v1: add the vector PMD support for FVL
PATCH v2: remove the extra code out of share code
change the prefetch position for splitter packets support
update the release note
PATCH v3: make the new patch set
PATCH v4: fix some format issues
Zhe Tao (4):
add vector PMD RX for FVL
add vector PMD TX for FVL
add vector PMD scatter RX for FVL
add RX and TX selection function for FVL
config/common_bsdapp | 2 +
config/common_linuxapp | 2 +
doc/guides/rel_notes/release_2_2.rst | 4 +
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 20 +-
drivers/net/i40e/i40e_ethdev.h | 6 +
drivers/net/i40e/i40e_ethdev_vf.c | 28 +-
drivers/net/i40e/i40e_rxtx.c | 227 +++++++++-
drivers/net/i40e/i40e_rxtx.h | 40 +-
drivers/net/i40e/i40e_rxtx_vec.c | 777 +++++++++++++++++++++++++++++++++++
10 files changed, 1082 insertions(+), 25 deletions(-)
create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
@ 2015-10-30 14:16 ` Zhe Tao
2015-10-30 15:33 ` Thomas Monjalon
2015-10-30 14:16 ` [dpdk-dev] [PATCH 2/4 v4] add vector PMD TX " Zhe Tao
` (3 subsequent siblings)
4 siblings, 1 reply; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 14:16 UTC (permalink / raw)
To: dev
The vPMD RX function uses the multi-buffer and SSE instructions to
accelerate the RX speed, but now the pktype cannot be supported by the vPMD RX,
because it will decrease the performance heavily.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
config/common_bsdapp | 2 +
config/common_linuxapp | 2 +
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_rxtx.c | 28 ++-
drivers/net/i40e/i40e_rxtx.h | 28 ++-
drivers/net/i40e/i40e_rxtx_vec.c | 482 +++++++++++++++++++++++++++++++++++++++
6 files changed, 538 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
diff --git a/config/common_bsdapp b/config/common_bsdapp
index b37dcf4..3003da5 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -186,6 +186,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0de43d5..dadba4d 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -184,6 +184,8 @@ CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
+CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
+CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 55b7d31..d4695cb 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -95,6 +95,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index fd656d5..dfdc7d5 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1788,9 +1788,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
return txq->tx_rs_thresh;
}
-#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
- I40E_TX_DESC_CMD_EOP)
-
/* Populate 4 descriptors with data from 4 mbufs */
static inline void
tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -2625,6 +2622,9 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
}
void
@@ -3063,3 +3063,25 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+
+/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+uint16_t __attribute__((weak))
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+void __attribute__((weak))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 4385142..961a415 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -44,13 +44,27 @@
#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
#define RTE_PMD_I40E_RX_MAX_BURST 32
-#endif
+#define RTE_PMD_I40E_TX_MAX_BURST 32
+
+#define RTE_I40E_VPMD_RX_BURST 32
+#define RTE_I40E_VPMD_TX_BURST 32
+#define RTE_I40E_RXQ_REARM_THRESH 32
+#define RTE_I40E_MAX_RX_BURST RTE_I40E_RXQ_REARM_THRESH
+#define RTE_I40E_TX_MAX_FREE_BUF_SZ 64
+#define RTE_I40E_DESCS_PER_LOOP 4
#define I40E_RXBUF_SZ_1024 1024
#define I40E_RXBUF_SZ_2048 2048
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define I40E_TD_CMD (I40E_TX_DESC_CMD_ICRC |\
+ I40E_TX_DESC_CMD_EOP)
+
enum i40e_header_split_mode {
i40e_header_split_none = 0,
i40e_header_split_enabled = 1,
@@ -100,6 +114,11 @@ struct i40e_rx_queue {
struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
+
+ uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
+ uint16_t rxrearm_start; /**< the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /**< value to init mbufs */
+
uint8_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
@@ -210,4 +229,9 @@ uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
new file mode 100644
index 0000000..aa7f52a
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -0,0 +1,482 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+#include <tmmintrin.h>
+
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM);
+ __m128i dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = _mm_setzero_si128();
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ _mm_store_si128((__m128i *)&rxdp[i].read,
+ dma_addr0);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ __m128i vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
+ vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0);
+ dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room);
+ dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0);
+ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i vlan0, vlan1, rss;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except rss and vlan flags
+ *bit2 is for vlan tag, bits 13:12 for rss
+ */
+ const __m128i rss_vlan_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3004, 0x3004, 0x3004, 0x3004);
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, PKT_RX_VLAN_PKT,
+ 0, 0, 0, 0);
+
+ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0);
+
+ vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+
+ vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
+ vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
+
+ rss = _mm_srli_epi16(vlan1, 12);
+ rss = _mm_shuffle_epi8(rss_flags, rss);
+
+ vlan0 = _mm_or_si128(vlan0, rss);
+ vol.dword = _mm_cvtsi128_si64(vlan0);
+
+ rx_pkts[0]->ol_flags = vol.e[0];
+ rx_pkts[1]->ol_flags = vol.e[1];
+ rx_pkts[2]->ol_flags = vol.e[2];
+ rx_pkts[3]->ol_flags = vol.e[3];
+}
+#else
+#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT (6)
+#define PKTLEN_MASK (0x3FFF)
+/* Handling the pkt len field is not aligned with 1byte, so shift is
+ * needed to let it align
+ */
+static inline void
+desc_pktlen_align(__m128i descs[4])
+{
+ __m128i pktlen0, pktlen1, zero;
+ union {
+ uint16_t e[4];
+ uint64_t dword;
+ } vol;
+
+ /* mask everything except pktlen field*/
+ const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
+ PKTLEN_MASK, PKTLEN_MASK);
+
+ pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
+ pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
+ pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
+
+ zero = _mm_xor_si128(pktlen0, pktlen0);
+
+ pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
+ pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
+
+ pktlen0 = _mm_packs_epi32(pktlen0, zero);
+ vol.dword = _mm_cvtsi128_si64(pktlen0);
+ /* let the descriptor byte 15-14 store the pkt len */
+ *((uint16_t *)&descs[0]+7) = vol.e[0];
+ *((uint16_t *)&descs[1]+7) = vol.e[1];
+ *((uint16_t *)&descs[2]+7) = vol.e[2];
+ *((uint16_t *)&descs[3]+7) = vol.e[3];
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ __m128i shuf_msk;
+
+ __m128i crc_adjust = _mm_set_epi16(
+ 0, 0, 0, /* ignore non-length fields */
+ -rxq->crc_len, /* sub crc on data_len */
+ 0, /* ignore high-16bits of pkt_len */
+ -rxq->crc_len, /* sub crc on pkt_len */
+ 0, 0 /* ignore pkt_type field */
+ );
+ __m128i dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL);
+
+ /* 4 packets EOP mask */
+ eop_check = _mm_set_epi64x(0x0000000200000002LL, 0x0000000200000002LL);
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = _mm_set_epi8(
+ 7, 6, 5, 4, /* octet 4~7, 32bits rss */
+ 3, 2, /* octet 2~3, low 16 bits vlan_macip */
+ 15, 14, /* octet 15~14, 16 bits data_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 15, 14, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF /*pkt_type set as unknown */
+ );
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < RTE_I40E_VPMD_RX_BURST;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ __m128i descs[RTE_I40E_DESCS_PER_LOOP];
+ __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i zero, staterr, sterr_tmp1, sterr_tmp2;
+ __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+
+ descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+
+ if (split_packet) {
+ rte_prefetch0(&rx_pkts[pos]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
+ rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+ }
+
+ /*shift the pktlen field*/
+ desc_pktlen_align(descs);
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
+ pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ zero = _mm_xor_si128(dd_check, dd_check);
+ staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+3]->rx_descriptor_fields1,
+ pkt_mb4);
+ _mm_storeu_si128((void *)&rx_pkts[pos+2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ __m128i eop_shuf_mask = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ );
+
+ /* and with mask to extract bits, flipping 1-0 */
+ __m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = _mm_and_si128(staterr, dd_check);
+ staterr = _mm_packs_epi32(staterr, zero);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ _mm_storeu_si128((void *)&rx_pkts[pos+1]->rx_descriptor_fields1,
+ pkt_mb2);
+ _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
+ * in one loop
+ *
+ * Notice:
+ * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
+ * numbers of DD bit
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX " Zhe Tao
@ 2015-10-30 15:33 ` Thomas Monjalon
0 siblings, 0 replies; 36+ messages in thread
From: Thomas Monjalon @ 2015-10-30 15:33 UTC (permalink / raw)
To: Zhe Tao; +Cc: dev
2015-10-30 22:16, Zhe Tao:
> The vPMD RX function uses the multi-buffer and SSE instructions to
> accelerate the RX speed, but now the pktype cannot be supported by the vPMD RX,
> because it will decrease the performance heavily.
So we cannot rely on the pktype information?
Depending on which Rx path is used, we will have the information or not.
> Signed-off-by: Zhe Tao <zhe.tao@intel.com>
> +CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
When we'll have some devargs options for PCI dev, we could move this
option to runtime.
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 2/4 v4] add vector PMD TX for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX " Zhe Tao
@ 2015-10-30 14:16 ` Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 3/4 v4] add vector PMD scatter RX " Zhe Tao
` (2 subsequent siblings)
4 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 14:16 UTC (permalink / raw)
To: dev
The way to increase the performance of the vPMD TX is to use some fast mbuf
release method compares to the scalar TX.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 8 ++
drivers/net/i40e/i40e_rxtx.h | 3 +
drivers/net/i40e/i40e_rxtx_vec.c | 163 +++++++++++++++++++++++++++++++++++++++
3 files changed, 174 insertions(+)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index dfdc7d5..8fac220 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3085,3 +3085,11 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
+
+uint16_t __attribute__((weak))
+i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 961a415..6dea402 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -232,6 +232,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
+int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
+uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index aa7f52a..b97e372 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -445,6 +445,163 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ __m128i descriptor = _mm_set_epi64x(high_qw,
+ pkt->buf_physaddr + pkt->data_off);
+ _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
void __attribute__((cold))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
@@ -480,3 +637,9 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
rxq->mbuf_initializer = *(uint64_t *)p;
return 0;
}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 3/4 v4] add vector PMD scatter RX for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 1/4 v4] add vector PMD RX " Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 2/4 v4] add vector PMD TX " Zhe Tao
@ 2015-10-30 14:16 ` Zhe Tao
2015-10-30 14:16 ` [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function " Zhe Tao
2015-10-30 14:24 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Liang, Cunming
4 siblings, 0 replies; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 14:16 UTC (permalink / raw)
To: dev
To support the multiple segments in one packets when the received pkts exceed
one buffer size.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
drivers/net/i40e/i40e_rxtx.c | 9 +++
drivers/net/i40e/i40e_rxtx.h | 3 +
drivers/net/i40e/i40e_rxtx_vec.c | 118 ++++++++++++++++++++++++++++++++++++---
3 files changed, 121 insertions(+), 9 deletions(-)
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 8fac220..d37cbde 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -3074,6 +3074,15 @@ i40e_recv_pkts_vec(
return 0;
}
+uint16_t __attribute__((weak))
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
int __attribute__((weak))
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 6dea402..98d6e90 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -231,6 +231,9 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index b97e372..f746209 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -220,9 +220,7 @@ desc_pktlen_align(__m128i descs[4])
*((uint16_t *)&descs[3]+7) = vol.e[3];
}
- /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
- * in one loop
- *
+ /*
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
* - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
@@ -430,13 +428,11 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
return nb_pkts_recd;
}
- /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
- *
+ /*
* Notice:
- * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
- * numbers of DD bit
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
*/
uint16_t
i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -445,6 +441,110 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
static inline void
vtx1(volatile struct i40e_tx_desc *txdp,
struct rte_mbuf *pkt, uint64_t flags)
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
` (2 preceding siblings ...)
2015-10-30 14:16 ` [dpdk-dev] [PATCH 3/4 v4] add vector PMD scatter RX " Zhe Tao
@ 2015-10-30 14:16 ` Zhe Tao
2015-10-30 15:59 ` Thomas Monjalon
2015-10-30 14:24 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Liang, Cunming
4 siblings, 1 reply; 36+ messages in thread
From: Zhe Tao @ 2015-10-30 14:16 UTC (permalink / raw)
To: dev
To support FVL PMD can select which RX and TX function should be used
according to the queue config.
Signed-off-by: Zhe Tao <zhe.tao@intel.com>
---
doc/guides/rel_notes/release_2_2.rst | 4 +
drivers/net/i40e/i40e_ethdev.c | 20 +++-
drivers/net/i40e/i40e_ethdev.h | 6 ++
drivers/net/i40e/i40e_ethdev_vf.c | 28 ++++--
drivers/net/i40e/i40e_rxtx.c | 182 ++++++++++++++++++++++++++++++++---
drivers/net/i40e/i40e_rxtx.h | 6 ++
drivers/net/i40e/i40e_rxtx_vec.c | 32 ++++++
7 files changed, 258 insertions(+), 20 deletions(-)
diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst
index bc9b00f..c78ceca 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -39,6 +39,10 @@ Drivers
Fixed i40e issue that occurred when a DPDK application didn't initialize
ports if memory wasn't available on socket 0.
+* **i40e: Add vector PMD support for FVL.**
+
+ Add vector RX/TX support for FVL
+
* **vhost: Fixed Qemu shutdown.**
Fixed issue with libvirt ``virsh destroy`` not killing the VM.
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2dd9fdc..153be45 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -403,8 +403,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (dev->data->scattered_rx)
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(dev);
+ i40e_set_tx_function(dev);
return 0;
}
pci_dev = dev->pci_dev;
@@ -670,10 +670,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
static int
i40e_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
int ret;
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the
+ * bulk allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@ -3707,6 +3717,9 @@ i40e_dev_tx_init(struct i40e_pf *pf)
if (ret != I40E_SUCCESS)
break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_tx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
@@ -3733,6 +3746,9 @@ i40e_dev_rx_init(struct i40e_pf *pf)
break;
}
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(container_of(pf, struct i40e_adapter, pf)
+ ->eth_dev);
return ret;
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6185657..ab78c54 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -462,6 +462,12 @@ struct i40e_adapter {
struct i40e_pf pf;
struct i40e_vf vf;
};
+
+ /* for vector PMD */
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
+ bool tx_simple_allowed;
+ bool tx_vec_allowed;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index b694400..b8ebacb 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1182,8 +1182,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ i40e_set_rx_function(eth_dev);
+ i40e_set_tx_function(eth_dev);
return 0;
}
@@ -1277,6 +1277,17 @@ PMD_REGISTER_DRIVER(rte_i40evf_driver);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ ad->tx_simple_allowed = true;
+ ad->tx_vec_allowed = true;
+
return i40evf_init_vlan(dev);
}
@@ -1508,7 +1519,6 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
if (dev_data->dev_conf.rxmode.enable_scatter ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
return 0;
@@ -1519,6 +1529,7 @@ i40evf_rx_init(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t i;
+ int ret = I40E_SUCCESS;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
@@ -1526,11 +1537,14 @@ i40evf_rx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!rxq[i] || !rxq[i]->q_set)
continue;
- if (i40evf_rxq_init(dev, rxq[i]) < 0)
- return -EFAULT;
+ ret = i40evf_rxq_init(dev, rxq[i]);
+ if (ret != I40E_SUCCESS)
+ break;
}
+ if (ret == I40E_SUCCESS)
+ i40e_set_rx_function(dev);
- return 0;
+ return ret;
}
static void
@@ -1543,6 +1557,8 @@ i40evf_tx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++)
txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+
+ i40e_set_tx_function(dev);
}
static inline void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index d37cbde..8731712 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2105,6 +2105,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vsi *vsi;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct i40e_rx_queue *rxq;
const struct rte_memzone *rz;
uint32_t ring_size;
@@ -2213,13 +2215,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
- if (!use_def_burst_func && !dev->data->scattered_rx) {
+ if (!use_def_burst_func) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
"used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -2227,6 +2228,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
"or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
"not enabled on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
}
return 0;
@@ -2488,14 +2490,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
dev->data->tx_queues[queue_idx] = txq;
/* Use a simple TX queue without offloads or multi segs if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
- (txq->tx_rs_thresh >= I40E_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
- } else {
- PMD_INIT_LOG(INFO, "Using full-featured tx path");
- dev->tx_pkt_burst = i40e_xmit_pkts;
- }
+ i40e_set_tx_function_flag(dev, txq);
return 0;
}
@@ -2564,6 +2559,12 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
{
uint16_t i;
+ /* SSE Vector driver has a different way of releasing mbufs. */
+ if (rxq->rx_using_sse) {
+ i40e_rx_queue_release_mbufs_vec(rxq);
+ return;
+ }
+
if (!rxq || !rxq->sw_ring) {
PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
return;
@@ -2837,7 +2838,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
int err = I40E_SUCCESS;
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(rxq->vsi);
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
@@ -2893,7 +2893,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
}
/* Init the RX tail regieter. */
@@ -3064,7 +3063,159 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
return I40E_SUCCESS;
}
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (dev->data->scattered_rx) {
+ /* Set the non-LRO scattered callback: there are Vector and
+ * single allocation versions.
+ */
+ if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+ }
+ /* If parameters allow we are going to choose between the following
+ * callbacks:
+ * - Vector
+ * - Bulk Allocation
+ * - Single buffer allocation (the simplest one)
+ */
+ } else if (ad->rx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
+ "burst size no less than %d (port=%d).",
+ RTE_I40E_DESCS_PER_LOOP,
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_vec;
+ } else if (ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested "
+ "(port=%d).",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue (no offloads, no multi segs) if possible */
+ if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
+ && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
+ if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
+ PMD_INIT_LOG(DEBUG, "Vector tx"
+ " can be enabled on this txq.");
+
+ } else {
+ ad->tx_vec_allowed = false;
+ }
+ } else {
+ ad->tx_simple_allowed = false;
+ }
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ }
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
+int __attribute__((weak))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
uint16_t __attribute__((weak))
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
@@ -3089,6 +3240,12 @@ i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
return -1;
}
+int __attribute__((weak))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
void __attribute__((weak))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
@@ -3102,3 +3259,4 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
{
return 0;
}
+
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 98d6e90..39cb95a 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -132,6 +132,7 @@ struct i40e_rx_queue {
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
bool rx_deferred_start; /**< don't start this queue in dev start */
+ uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
};
struct i40e_tx_entry {
@@ -234,10 +235,15 @@ uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+void i40e_set_rx_function(struct rte_eth_dev *dev);
+void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq);
+void i40e_set_tx_function(struct rte_eth_dev *dev);
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index f746209..047aff5 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -743,3 +743,35 @@ i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
--
1.9.3
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function " Zhe Tao
@ 2015-10-30 15:59 ` Thomas Monjalon
0 siblings, 0 replies; 36+ messages in thread
From: Thomas Monjalon @ 2015-10-30 15:59 UTC (permalink / raw)
To: Zhe Tao; +Cc: dev
2015-10-30 22:16, Zhe Tao:
> --- a/doc/guides/rel_notes/release_2_2.rst
> +++ b/doc/guides/rel_notes/release_2_2.rst
> @@ -39,6 +39,10 @@ Drivers
> Fixed i40e issue that occurred when a DPDK application didn't initialize
> ports if memory wasn't available on socket 0.
>
> +* **i40e: Add vector PMD support for FVL.**
> +
> + Add vector RX/TX support for FVL
It is inserted in the bug section. Hidden meaning? ;)
Sorry for being sarcastic, I just want to avoid this kind of extra work
in the future.
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support for FVL
2015-10-30 14:16 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Zhe Tao
` (3 preceding siblings ...)
2015-10-30 14:16 ` [dpdk-dev] [PATCH 4/4 v4] add RX and TX selection function " Zhe Tao
@ 2015-10-30 14:24 ` Liang, Cunming
2015-10-30 15:41 ` Thomas Monjalon
4 siblings, 1 reply; 36+ messages in thread
From: Liang, Cunming @ 2015-10-30 14:24 UTC (permalink / raw)
To: Tao, Zhe, dev
Hi,
> -----Original Message-----
> From: Tao, Zhe
> Sent: Friday, October 30, 2015 10:17 PM
> To: dev@dpdk.org
> Cc: Tao, Zhe; Liang, Cunming
> Subject: [dpdk-dev][PATCH 0/4 v4] i40e: add vector PMD support for FVL
>
> This patch set add the vector PMD support for FVL.
> FVL vPMD works like the way ixgbe does
> All the functionality is tested
>
> PATCH v1: add the vector PMD support for FVL
>
> PATCH v2: remove the extra code out of share code
> change the prefetch position for splitter packets support
> update the release note
>
> PATCH v3: make the new patch set
>
> PATCH v4: fix some format issues
>
> Zhe Tao (4):
> add vector PMD RX for FVL
> add vector PMD TX for FVL
> add vector PMD scatter RX for FVL
> add RX and TX selection function for FVL
>
> config/common_bsdapp | 2 +
> config/common_linuxapp | 2 +
> doc/guides/rel_notes/release_2_2.rst | 4 +
> drivers/net/i40e/Makefile | 1 +
> drivers/net/i40e/i40e_ethdev.c | 20 +-
> drivers/net/i40e/i40e_ethdev.h | 6 +
> drivers/net/i40e/i40e_ethdev_vf.c | 28 +-
> drivers/net/i40e/i40e_rxtx.c | 227 +++++++++-
> drivers/net/i40e/i40e_rxtx.h | 40 +-
> drivers/net/i40e/i40e_rxtx_vec.c | 777
> +++++++++++++++++++++++++++++++++++
> 10 files changed, 1082 insertions(+), 25 deletions(-)
> create mode 100644 drivers/net/i40e/i40e_rxtx_vec.c
>
> --
> 1.9.3
Acked-by: Cunming Liang <cunming.liang@intel.com>
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support for FVL
2015-10-30 14:24 ` [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support " Liang, Cunming
@ 2015-10-30 15:41 ` Thomas Monjalon
2015-10-30 16:02 ` Thomas Monjalon
0 siblings, 1 reply; 36+ messages in thread
From: Thomas Monjalon @ 2015-10-30 15:41 UTC (permalink / raw)
To: Liang, Cunming; +Cc: dev
2015-10-30 14:24, Liang, Cunming:
> > add vector PMD RX for FVL
> > add vector PMD TX for FVL
> > add vector PMD scatter RX for FVL
> > add RX and TX selection function for FVL
>
> Acked-by: Cunming Liang <cunming.liang@intel.com>
Cunming, before accepting some patches, you should run checkpatch
and check the wording of the titles.
There are some things which do not comply with the coding style but I
cannot block the patches for that reason because nobody care.
Today, it seems more important to ack every patches.
^ permalink raw reply [flat|nested] 36+ messages in thread
* Re: [dpdk-dev] [PATCH 0/4 v4] i40e: add vector PMD support for FVL
2015-10-30 15:41 ` Thomas Monjalon
@ 2015-10-30 16:02 ` Thomas Monjalon
0 siblings, 0 replies; 36+ messages in thread
From: Thomas Monjalon @ 2015-10-30 16:02 UTC (permalink / raw)
To: Tao, Zhe; +Cc: dev
2015-10-30 16:41, Thomas Monjalon:
> 2015-10-30 14:24, Liang, Cunming:
> > > add vector PMD RX for FVL
> > > add vector PMD TX for FVL
> > > add vector PMD scatter RX for FVL
> > > add RX and TX selection function for FVL
> >
> > Acked-by: Cunming Liang <cunming.liang@intel.com>
>
> Cunming, before accepting some patches, you should run checkpatch
> and check the wording of the titles.
> There are some things which do not comply with the coding style but I
> cannot block the patches for that reason because nobody care.
> Today, it seems more important to ack every patches.
Applied
^ permalink raw reply [flat|nested] 36+ messages in thread