DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev]  [PATCH] net/thunderx: add support for Rx VLAN offload
@ 2018-07-01 16:46 Pavan Nikhilesh
  2018-07-04 17:36 ` Ferruh Yigit
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-01 16:46 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Kudurumalla, Rakesh, Pavan Nikhilesh

From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>

This feature is used to offload stripping of vlan header from recevied
packets and update vlan_tci field in mbuf when
DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/net/thunderx/base/nicvf_hw.c |  1 +
 drivers/net/thunderx/nicvf_ethdev.c  | 59 +++++++++++++++++------
 drivers/net/thunderx/nicvf_rxtx.c    | 70 ++++++++++++++++++++++++----
 drivers/net/thunderx/nicvf_rxtx.h    | 15 ++++--
 drivers/net/thunderx/nicvf_struct.h  |  1 +
 5 files changed, 119 insertions(+), 27 deletions(-)

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index b07a2937d..5b1abe201 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,7 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
 	else
 		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
 
+	nic->vlan_strip = enable;
 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
 }
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 76fed9f99..4f58b2e33 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -52,6 +52,8 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
 			  bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
 RTE_INIT(nicvf_init_log);
 static void
@@ -357,11 +359,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	}
 
 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
-	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
-		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
-		return ptypes;
 
-	return NULL;
+	/* All Ptypes are supported in all Rx functions. */
+	return ptypes;
 }
 
 static void
@@ -918,13 +918,18 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 static void
 nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
-	if (dev->data->scattered_rx) {
-		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
-	} else {
-		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts;
-	}
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+
+	const eth_rx_burst_t rx_burst_func[2][2] = {
+	/* [NORMAL/SCATTER] [VLAN_STRIP/NO_VLAN_STRIP] */
+		[0][0] = nicvf_recv_pkts_no_offload,
+		[0][1] = nicvf_recv_pkts_vlan_strip,
+		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+	};
+
+	dev->rx_pkt_burst =
+		rx_burst_func[dev->data->scattered_rx][nic->vlan_strip];
 }
 
 static int
@@ -1469,7 +1474,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
-	bool vlan_strip;
+	int mask;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1590,9 +1595,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_STRIP);
-	nicvf_vlan_hw_strip(nic, vlan_strip);
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+		ETH_VLAN_EXTEND_MASK;
+	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1983,6 +1988,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.dev_infos_get            = nicvf_dev_info_get,
 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
 	.mtu_set                  = nicvf_dev_set_mtu,
+	.vlan_offload_set         = nicvf_vlan_offload_set,
 	.reta_update              = nicvf_dev_reta_update,
 	.reta_query               = nicvf_dev_reta_query,
 	.rss_hash_update          = nicvf_dev_rss_hash_update,
@@ -1999,6 +2005,29 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.get_reg                  = nicvf_dev_get_regs,
 };
 
+static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+	struct rte_eth_rxmode *rxmode;
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			nicvf_vlan_hw_strip(nic, true);
+		else
+			nicvf_vlan_hw_strip(nic, false);
+	}
+	return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	nicvf_vlan_offload_config(dev, mask);
+
+	return 0;
+}
+
 static inline int
 nicvf_set_first_skip(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d2..9454e6455 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -388,8 +388,9 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
 	}
 }
 
-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		const uint16_t flag)
 {
 	uint32_t i, to_process;
 	struct cqe_rx_t *cqe_rx;
@@ -420,7 +421,18 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
 		pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 				(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-		pkt->ol_flags = 0;
+
+		if (flag & NICVF_RX_OFFLOAD_NONE)
+			pkt->ol_flags = 0;
+		if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+			if (unlikely(cqe_rx_w0.vlan_stripped)) {
+				pkt->ol_flags |= PKT_RX_VLAN
+							| PKT_RX_VLAN_STRIPPED;
+				pkt->vlan_tci =
+					rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+			}
+		}
+
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +457,27 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	return to_process;
 }
 
-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
 			struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
-			uint64_t mbuf_init)
+			uint64_t mbuf_init, const uint16_t flag)
 {
 	struct rte_mbuf *pkt, *seg, *prev;
 	cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +495,20 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 			(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
 
-	pkt->ol_flags = 0;
 	pkt->pkt_len = cqe_rx_w1.pkt_len;
 	pkt->data_len = rb_sz[nicvf_frag_num(0)];
 	nicvf_mbuff_init_mseg_update(
 				pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
 	pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_NONE)
+		pkt->ol_flags = 0;
+	if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+		if (unlikely(cqe_rx_w0.vlan_stripped)) {
+			pkt->ol_flags |= PKT_RX_VLAN
+				| PKT_RX_VLAN_STRIPPED;
+			pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+		}
+	}
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
 
 	*rx_pkt = pkt;
@@ -491,9 +527,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	return nb_segs;
 }
 
-uint16_t __hot
+static __rte_always_inline uint16_t
 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-			 uint16_t nb_pkts)
+			 uint16_t nb_pkts, const uint16_t flag)
 {
 	union cq_entry_t *cq_entry;
 	struct cqe_rx_t *cqe_rx;
@@ -515,7 +551,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 		cq_entry = &desc[cqe_head];
 		cqe_rx = (struct cqe_rx_t *)cq_entry;
 		nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
-			rx_pkts + i, rbptr_offset, mbuf_init);
+			rx_pkts + i, rbptr_offset, mbuf_init, flag);
 		buffers_consumed += nb_segs;
 		cqe_head = (cqe_head + 1) & cqe_mask;
 		nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +571,22 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return to_process;
 }
 
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582ed..cb309782e 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,9 @@
 #include <rte_byteorder.h>
 #include <rte_ethdev_driver.h>
 
+#define NICVF_RX_OFFLOAD_NONE           0x1
+#define NICVF_RX_OFFLOAD_VLAN_STRIP     0x2
+
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
 
 #ifndef __hot
@@ -86,9 +89,15 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
 
-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-				  uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cf98f7c1a..6dfbeb8ee 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -85,6 +85,7 @@ struct nicvf {
 	bool loopback_supported;
 	bool pf_acked:1;
 	bool pf_nacked:1;
+	bool vlan_strip:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
-- 
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-07-19 13:15 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-01 16:46 [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-04 17:36 ` Ferruh Yigit
2018-07-13 14:16   ` rkudurumalla
2018-07-14  8:02     ` Andrew Rybchenko
2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
2018-07-16  9:26   ` [dpdk-dev] [PATCH v2 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-18 13:48   ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Ferruh Yigit
2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
2018-07-18 15:05   ` [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-18 17:30     ` Jerin Jacob
2018-07-18 17:27   ` [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload Jerin Jacob
2018-07-19 13:15     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).