DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev]  [PATCH] net/thunderx: add support for Rx VLAN offload
@ 2018-07-01 16:46 Pavan Nikhilesh
  2018-07-04 17:36 ` Ferruh Yigit
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-01 16:46 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Kudurumalla, Rakesh, Pavan Nikhilesh

From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>

This feature is used to offload stripping of vlan header from recevied
packets and update vlan_tci field in mbuf when
DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/net/thunderx/base/nicvf_hw.c |  1 +
 drivers/net/thunderx/nicvf_ethdev.c  | 59 +++++++++++++++++------
 drivers/net/thunderx/nicvf_rxtx.c    | 70 ++++++++++++++++++++++++----
 drivers/net/thunderx/nicvf_rxtx.h    | 15 ++++--
 drivers/net/thunderx/nicvf_struct.h  |  1 +
 5 files changed, 119 insertions(+), 27 deletions(-)

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index b07a2937d..5b1abe201 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,7 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
 	else
 		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
 
+	nic->vlan_strip = enable;
 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
 }
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 76fed9f99..4f58b2e33 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -52,6 +52,8 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
 			  bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
 RTE_INIT(nicvf_init_log);
 static void
@@ -357,11 +359,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	}
 
 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
-	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
-		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
-		return ptypes;
 
-	return NULL;
+	/* All Ptypes are supported in all Rx functions. */
+	return ptypes;
 }
 
 static void
@@ -918,13 +918,18 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 static void
 nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
-	if (dev->data->scattered_rx) {
-		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
-	} else {
-		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts;
-	}
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+
+	const eth_rx_burst_t rx_burst_func[2][2] = {
+	/* [NORMAL/SCATTER] [VLAN_STRIP/NO_VLAN_STRIP] */
+		[0][0] = nicvf_recv_pkts_no_offload,
+		[0][1] = nicvf_recv_pkts_vlan_strip,
+		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+	};
+
+	dev->rx_pkt_burst =
+		rx_burst_func[dev->data->scattered_rx][nic->vlan_strip];
 }
 
 static int
@@ -1469,7 +1474,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
-	bool vlan_strip;
+	int mask;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1590,9 +1595,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_STRIP);
-	nicvf_vlan_hw_strip(nic, vlan_strip);
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+		ETH_VLAN_EXTEND_MASK;
+	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1983,6 +1988,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.dev_infos_get            = nicvf_dev_info_get,
 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
 	.mtu_set                  = nicvf_dev_set_mtu,
+	.vlan_offload_set         = nicvf_vlan_offload_set,
 	.reta_update              = nicvf_dev_reta_update,
 	.reta_query               = nicvf_dev_reta_query,
 	.rss_hash_update          = nicvf_dev_rss_hash_update,
@@ -1999,6 +2005,29 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.get_reg                  = nicvf_dev_get_regs,
 };
 
+static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+	struct rte_eth_rxmode *rxmode;
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			nicvf_vlan_hw_strip(nic, true);
+		else
+			nicvf_vlan_hw_strip(nic, false);
+	}
+	return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	nicvf_vlan_offload_config(dev, mask);
+
+	return 0;
+}
+
 static inline int
 nicvf_set_first_skip(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d2..9454e6455 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -388,8 +388,9 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
 	}
 }
 
-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		const uint16_t flag)
 {
 	uint32_t i, to_process;
 	struct cqe_rx_t *cqe_rx;
@@ -420,7 +421,18 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
 		pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 				(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-		pkt->ol_flags = 0;
+
+		if (flag & NICVF_RX_OFFLOAD_NONE)
+			pkt->ol_flags = 0;
+		if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+			if (unlikely(cqe_rx_w0.vlan_stripped)) {
+				pkt->ol_flags |= PKT_RX_VLAN
+							| PKT_RX_VLAN_STRIPPED;
+				pkt->vlan_tci =
+					rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+			}
+		}
+
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +457,27 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	return to_process;
 }
 
-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
 			struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
-			uint64_t mbuf_init)
+			uint64_t mbuf_init, const uint16_t flag)
 {
 	struct rte_mbuf *pkt, *seg, *prev;
 	cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +495,20 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 			(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
 
-	pkt->ol_flags = 0;
 	pkt->pkt_len = cqe_rx_w1.pkt_len;
 	pkt->data_len = rb_sz[nicvf_frag_num(0)];
 	nicvf_mbuff_init_mseg_update(
 				pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
 	pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_NONE)
+		pkt->ol_flags = 0;
+	if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+		if (unlikely(cqe_rx_w0.vlan_stripped)) {
+			pkt->ol_flags |= PKT_RX_VLAN
+				| PKT_RX_VLAN_STRIPPED;
+			pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+		}
+	}
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
 
 	*rx_pkt = pkt;
@@ -491,9 +527,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	return nb_segs;
 }
 
-uint16_t __hot
+static __rte_always_inline uint16_t
 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-			 uint16_t nb_pkts)
+			 uint16_t nb_pkts, const uint16_t flag)
 {
 	union cq_entry_t *cq_entry;
 	struct cqe_rx_t *cqe_rx;
@@ -515,7 +551,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 		cq_entry = &desc[cqe_head];
 		cqe_rx = (struct cqe_rx_t *)cq_entry;
 		nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
-			rx_pkts + i, rbptr_offset, mbuf_init);
+			rx_pkts + i, rbptr_offset, mbuf_init, flag);
 		buffers_consumed += nb_segs;
 		cqe_head = (cqe_head + 1) & cqe_mask;
 		nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +571,22 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return to_process;
 }
 
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582ed..cb309782e 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,9 @@
 #include <rte_byteorder.h>
 #include <rte_ethdev_driver.h>
 
+#define NICVF_RX_OFFLOAD_NONE           0x1
+#define NICVF_RX_OFFLOAD_VLAN_STRIP     0x2
+
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
 
 #ifndef __hot
@@ -86,9 +89,15 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
 
-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-				  uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cf98f7c1a..6dfbeb8ee 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -85,6 +85,7 @@ struct nicvf {
 	bool loopback_supported;
 	bool pf_acked:1;
 	bool pf_nacked:1;
+	bool vlan_strip:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
-- 
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload
  2018-07-01 16:46 [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
@ 2018-07-04 17:36 ` Ferruh Yigit
  2018-07-13 14:16   ` rkudurumalla
  2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
  2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
  2 siblings, 1 reply; 12+ messages in thread
From: Ferruh Yigit @ 2018-07-04 17:36 UTC (permalink / raw)
  To: Pavan Nikhilesh, jerin.jacob, santosh.shukla, rkudurumalla
  Cc: dev, Kudurumalla, Rakesh, Shahaf Shuler, Andrew Rybchenko, Olivier MATZ

On 7/1/2018 5:46 PM, Pavan Nikhilesh wrote:
> From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>
> 
> This feature is used to offload stripping of vlan header from recevied
> packets and update vlan_tci field in mbuf when
> DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.
> 
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
>  drivers/net/thunderx/base/nicvf_hw.c |  1 +
>  drivers/net/thunderx/nicvf_ethdev.c  | 59 +++++++++++++++++------
>  drivers/net/thunderx/nicvf_rxtx.c    | 70 ++++++++++++++++++++++++----
>  drivers/net/thunderx/nicvf_rxtx.h    | 15 ++++--
>  drivers/net/thunderx/nicvf_struct.h  |  1 +

In thunderx.ini, "VLAN offload" already marked as P(Partially) is it still
partially? Why?


<...>

> @@ -1590,9 +1595,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
>  		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
>  
>  	/* Configure VLAN Strip */
> -	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
> -			DEV_RX_OFFLOAD_VLAN_STRIP);
> -	nicvf_vlan_hw_strip(nic, vlan_strip);
> +	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
> +		ETH_VLAN_EXTEND_MASK;

You don't need anything more than ETH_VLAN_STRIP_MASK but agreed no issue add
more if you prefer.

> +	ret = nicvf_vlan_offload_config(dev, mask);
>  
>  	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
>  	 * to the 64bit memory address.
> @@ -1983,6 +1988,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
>  	.dev_infos_get            = nicvf_dev_info_get,
>  	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
>  	.mtu_set                  = nicvf_dev_set_mtu,
> +	.vlan_offload_set         = nicvf_vlan_offload_set,

Not related to this patch but I believe this name 'vlan_offload_set' is
confusing, it enable/disable VLAN related config:
- vlan strip offload
- vlan filtering package (drop/accept specific vlans)
- double vlan feature (not offload if I am not missing anything)
We can think about a more proper name later...

Also rte_eth_dev_set_vlan_offload() API may have a defect, it seems not taking
capability flags into account, cc'ed Shahaf and Andrew for information.

And I have a question about DEV_TX_OFFLOAD_VLAN_INSERT, perhaps goes to Olivier,
if DEV_TX_OFFLOAD_VLAN_INSERT enabled what is the correct way to provide
vlan_tci to insert?
And do we need something like PKT_RX_VLAN_INSERT and use mbuf->vlan_tci value to
have the ability to insert VLAN to some packets?

<...>

> +static int
> +nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
> +{
> +	nicvf_vlan_offload_config(dev, mask);

Don't you need to change rx_pkt_burst function according request here.

Like if driver was using nicvf_recv_pkts_vlan_strip() and application disabled
vlan_strip, what will happen?

<...>

> +uint16_t __hot
> +nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
> +		uint16_t nb_pkts)
> +{
> +	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
> +			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);

Why do you OR NICVF_RX_OFFLOAD_NONE, this cause zeroing the pkt->ol_flags which
will be overriten because of NICVF_RX_OFFLOAD_VLAN_STRIP already?

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload
  2018-07-04 17:36 ` Ferruh Yigit
@ 2018-07-13 14:16   ` rkudurumalla
  2018-07-14  8:02     ` Andrew Rybchenko
  0 siblings, 1 reply; 12+ messages in thread
From: rkudurumalla @ 2018-07-13 14:16 UTC (permalink / raw)
  To: Ferruh Yigit, Pavan Nikhilesh, jerin.jacob, santosh.shukla
  Cc: dev, Kudurumalla, Rakesh, Shahaf Shuler, Andrew Rybchenko, Olivier MATZ



On 07/04/2018 11:06 PM, Ferruh Yigit wrote:
> External Email
> 
> On 7/1/2018 5:46 PM, Pavan Nikhilesh wrote:
>> From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>
>>
>> This feature is used to offload stripping of vlan header from recevied
>> packets and update vlan_tci field in mbuf when
>> DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.
>>
>> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>> ---
>>  drivers/net/thunderx/base/nicvf_hw.c |  1 +
>>  drivers/net/thunderx/nicvf_ethdev.c  | 59 +++++++++++++++++------
>>  drivers/net/thunderx/nicvf_rxtx.c    | 70 ++++++++++++++++++++++++----
>>  drivers/net/thunderx/nicvf_rxtx.h    | 15 ++++--
>>  drivers/net/thunderx/nicvf_struct.h  |  1 +
> 
> In thunderx.ini, "VLAN offload" already marked as P(Partially) is it still
> partially? Why?
> 
> It is still partial because Tx VLAN offload(insertion of vlan header >
for tx packets) is yet to be Implemented
> 
> <...>
> 
>> @@ -1590,9 +1595,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
>>                    nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
>>
>>       /* Configure VLAN Strip */
>> -     vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
>> -                     DEV_RX_OFFLOAD_VLAN_STRIP);
>> -     nicvf_vlan_hw_strip(nic, vlan_strip);
>> +     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
>> +             ETH_VLAN_EXTEND_MASK;
> 
> You don't need anything more than ETH_VLAN_STRIP_MASK but agreed no issue add
> more if you prefer.
> 
>> +     ret = nicvf_vlan_offload_config(dev, mask);
>>
>>       /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
>>        * to the 64bit memory address.
>> @@ -1983,6 +1988,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
>>       .dev_infos_get            = nicvf_dev_info_get,
>>       .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
>>       .mtu_set                  = nicvf_dev_set_mtu,
>> +     .vlan_offload_set         = nicvf_vlan_offload_set,
> 
> Not related to this patch but I believe this name 'vlan_offload_set' is
> confusing, it enable/disable VLAN related config:
> - vlan strip offload
> - vlan filtering package (drop/accept specific vlans)
> - double vlan feature (not offload if I am not missing anything)
> We can think about a more proper name later...
> 
> Also rte_eth_dev_set_vlan_offload() API may have a defect, it seems not taking
> capability flags into account, cc'ed Shahaf and Andrew for information.
> 
> And I have a question about DEV_TX_OFFLOAD_VLAN_INSERT, perhaps goes to Olivier,
> if DEV_TX_OFFLOAD_VLAN_INSERT enabled what is the correct way to provide
> vlan_tci to insert?
> And do we need something like PKT_RX_VLAN_INSERT and use mbuf->vlan_tci value to
> have the ability to insert VLAN to some packets?
> 
> <...>
> 
>> +static int
>> +nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
>> +{
>> +     nicvf_vlan_offload_config(dev, mask);
> 
> Don't you need to change rx_pkt_burst function according request here.
> 
> Like if driver was using nicvf_recv_pkts_vlan_strip() and application disabled
> vlan_strip, what will happen?

> if application disables vlan_strip under this condition, hardware
doesn't strip vlan header from the packets and hence
cqe_rx_w0.vlan_stripped field is zero and pkt->ol_flags is update with zero

> 
> <...>
> 
>> +uint16_t __hot
>> +nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
>> +             uint16_t nb_pkts)
>> +{
>> +     return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
>> +                     NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
> 
> Why do you OR NICVF_RX_OFFLOAD_NONE, this cause zeroing the pkt->ol_flags which
> will be overriten because of NICVF_RX_OFFLOAD_VLAN_STRIP alread>
> After enabling vlan strip , if we receive plain packets pkt->ol_flags
is updated with zero value.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload
  2018-07-13 14:16   ` rkudurumalla
@ 2018-07-14  8:02     ` Andrew Rybchenko
  0 siblings, 0 replies; 12+ messages in thread
From: Andrew Rybchenko @ 2018-07-14  8:02 UTC (permalink / raw)
  To: rkudurumalla, Ferruh Yigit, Pavan Nikhilesh, jerin.jacob, santosh.shukla
  Cc: dev, Kudurumalla, Rakesh, Shahaf Shuler, Thomas Monjalon, Olivier MATZ

On 13.07.2018 17:16, rkudurumalla wrote:
> On 07/04/2018 11:06 PM, Ferruh Yigit wrote:
>> External Email
>>
>> On 7/1/2018 5:46 PM, Pavan Nikhilesh wrote:
>>> From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>
>>>
>>> This feature is used to offload stripping of vlan header from recevied
>>> packets and update vlan_tci field in mbuf when
>>> DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.
>>>
>>> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
>>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>>> ---
>>>   drivers/net/thunderx/base/nicvf_hw.c |  1 +
>>>   drivers/net/thunderx/nicvf_ethdev.c  | 59 +++++++++++++++++------
>>>   drivers/net/thunderx/nicvf_rxtx.c    | 70 ++++++++++++++++++++++++----
>>>   drivers/net/thunderx/nicvf_rxtx.h    | 15 ++++--
>>>   drivers/net/thunderx/nicvf_struct.h  |  1 +
>> In thunderx.ini, "VLAN offload" already marked as P(Partially) is it still
>> partially? Why?
>>
>> It is still partial because Tx VLAN offload(insertion of vlan header >
> for tx packets) is yet to be Implemented
>> <...>
>>
>>> @@ -1590,9 +1595,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
>>>                     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
>>>
>>>        /* Configure VLAN Strip */
>>> -     vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
>>> -                     DEV_RX_OFFLOAD_VLAN_STRIP);
>>> -     nicvf_vlan_hw_strip(nic, vlan_strip);
>>> +     mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
>>> +             ETH_VLAN_EXTEND_MASK;
>> You don't need anything more than ETH_VLAN_STRIP_MASK but agreed no issue add
>> more if you prefer.
>>
>>> +     ret = nicvf_vlan_offload_config(dev, mask);
>>>
>>>        /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
>>>         * to the 64bit memory address.
>>> @@ -1983,6 +1988,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
>>>        .dev_infos_get            = nicvf_dev_info_get,
>>>        .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
>>>        .mtu_set                  = nicvf_dev_set_mtu,
>>> +     .vlan_offload_set         = nicvf_vlan_offload_set,
>> Not related to this patch but I believe this name 'vlan_offload_set' is
>> confusing, it enable/disable VLAN related config:
>> - vlan strip offload
>> - vlan filtering package (drop/accept specific vlans)
>> - double vlan feature (not offload if I am not missing anything)
>> We can think about a more proper name later...
>>
>> Also rte_eth_dev_set_vlan_offload() API may have a defect, it seems not taking
>> capability flags into account, cc'ed Shahaf and Andrew for information.

Yes, the function could check if corresponding offloads are supported.

Right now we have unified interface to control offloads on device configure
and queues setup. This API to change VLAN offloads looks really legacy now.
If we really need API to control offloads at run time (I'm not sure), it 
should
be generic API for all offloads and corresponding information in dev_info
which specifies offloads are controllable at run time.

>> And I have a question about DEV_TX_OFFLOAD_VLAN_INSERT, perhaps goes to Olivier,
>> if DEV_TX_OFFLOAD_VLAN_INSERT enabled what is the correct way to provide
>> vlan_tci to insert?
>> And do we need something like PKT_RX_VLAN_INSERT and use mbuf->vlan_tci value to
>> have the ability to insert VLAN to some packets?

As I understand ol_flags should have PKT_TX_VLAN. Right now the description
does not mentione it, however the description for double-tagged insertion
PKT_TX_QINQ does.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload
  2018-07-01 16:46 [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
  2018-07-04 17:36 ` Ferruh Yigit
@ 2018-07-16  9:26 ` Pavan Nikhilesh
  2018-07-16  9:26   ` [dpdk-dev] [PATCH v2 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
  2018-07-18 13:48   ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Ferruh Yigit
  2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
  2 siblings, 2 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-16  9:26 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Pavan Nikhilesh

Add L3/L4 Rx checksum offload and update capabilities.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 v2 Changes:
 - Add Rx checksum offload support for l3fwd.

 drivers/net/thunderx/nicvf_ethdev.c | 33 ++++++++-----
 drivers/net/thunderx/nicvf_ethdev.h |  1 +
 drivers/net/thunderx/nicvf_rxtx.c   | 73 +++++++++++++++++++++++++----
 drivers/net/thunderx/nicvf_rxtx.h   | 15 ++++--
 drivers/net/thunderx/nicvf_struct.h | 27 ++++++-----
 5 files changed, 113 insertions(+), 36 deletions(-)

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 76fed9f99..8fd52e1c3 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -357,11 +357,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	}

 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
-	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
-		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
-		return ptypes;

-	return NULL;
+	/* All Ptypes are supported in all Rx functions. */
+	return ptypes;
 }

 static void
@@ -918,13 +916,18 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 static void
 nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
-	if (dev->data->scattered_rx) {
-		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
-	} else {
-		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts;
-	}
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+
+	const eth_rx_burst_t rx_burst_func[2][2] = {
+		/* [NORMAL/SCATTER] [NO_CKSUM/CKSUM] */
+		[0][0] = nicvf_recv_pkts_no_offload,
+		[0][1] = nicvf_recv_pkts_cksum,
+		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][1] = nicvf_recv_pkts_multiseg_cksum,
+	};
+
+	dev->rx_pkt_burst =
+		rx_burst_func[dev->data->scattered_rx][nic->offload_cksum];
 }

 static int
@@ -1245,6 +1248,9 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
 				offsetof(struct rte_mbuf, data_off) != 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
 				offsetof(struct rte_mbuf, data_off) != 6);
+	RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+				offsetof(struct nicvf_rxq,
+					rxq_fastpath_data_start) > 128);
 	mb_def.nb_segs = 1;
 	mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
 	mb_def.port = rxq->port_id;
@@ -1745,7 +1751,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 			return ret;
 	}

-	/* Configure callbacks based on scatter mode */
+	/* Configure callbacks based on offloads */
 	nicvf_set_tx_function(dev);
 	nicvf_set_rx_function(dev);

@@ -1964,6 +1970,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}

+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		nic->offload_cksum = 1;
+
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
 		dev->data->port_id, nicvf_hw_cap(nic));

diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 9af508803..ae440fef2 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -38,6 +38,7 @@
 	DEV_TX_OFFLOAD_MULTI_SEGS)

 #define NICVF_RX_OFFLOAD_CAPA ( \
+	DEV_RX_OFFLOAD_CHECKSUM    | \
 	DEV_RX_OFFLOAD_VLAN_STRIP  | \
 	DEV_RX_OFFLOAD_CRC_STRIP   | \
 	DEV_RX_OFFLOAD_JUMBO_FRAME | \
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d2..fa4ee824a 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -327,6 +327,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
 	return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
 }

+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+	static const uint64_t flag_table[3] __rte_cache_aligned = {
+		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+	};
+
+	const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+		(cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+	return flag_table[idx];
+}
+
 static inline int __hot
 nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
 {
@@ -385,11 +399,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
 	if (likely(cqe_rx_w0.rss_alg)) {
 		pkt->hash.rss = cqe_rx_w2.rss_tag;
 		pkt->ol_flags |= PKT_RX_RSS_HASH;
+
 	}
 }

-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		const uint32_t flag)
 {
 	uint32_t i, to_process;
 	struct cqe_rx_t *cqe_rx;
@@ -420,7 +436,11 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
 		pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 				(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-		pkt->ol_flags = 0;
+
+		if (flag & NICVF_RX_OFFLOAD_NONE)
+			pkt->ol_flags = 0;
+		if (flag & NICVF_RX_OFFLOAD_CKSUM)
+			pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +465,27 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	return to_process;
 }

-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM);
+}
+
+static __rte_always_inline uint16_t __hot
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
 			struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
-			uint64_t mbuf_init)
+			uint64_t mbuf_init, const uint32_t flag)
 {
 	struct rte_mbuf *pkt, *seg, *prev;
 	cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +503,15 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 			(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);

-	pkt->ol_flags = 0;
 	pkt->pkt_len = cqe_rx_w1.pkt_len;
 	pkt->data_len = rb_sz[nicvf_frag_num(0)];
 	nicvf_mbuff_init_mseg_update(
 				pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
 	pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_NONE)
+		pkt->ol_flags = 0;
+	if (flag & NICVF_RX_OFFLOAD_CKSUM)
+		pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);

 	*rx_pkt = pkt;
@@ -491,9 +530,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	return nb_segs;
 }

-uint16_t __hot
+static __rte_always_inline uint16_t __hot
 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-			 uint16_t nb_pkts)
+			 uint16_t nb_pkts, const uint32_t flag)
 {
 	union cq_entry_t *cq_entry;
 	struct cqe_rx_t *cqe_rx;
@@ -515,7 +554,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 		cq_entry = &desc[cqe_head];
 		cqe_rx = (struct cqe_rx_t *)cq_entry;
 		nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
-			rx_pkts + i, rbptr_offset, mbuf_init);
+			rx_pkts + i, rbptr_offset, mbuf_init, flag);
 		buffers_consumed += nb_segs;
 		cqe_head = (cqe_head + 1) & cqe_mask;
 		nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +574,22 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return to_process;
 }

+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582ed..72daffb60 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,9 @@
 #include <rte_byteorder.h>
 #include <rte_ethdev_driver.h>

+#define NICVF_RX_OFFLOAD_NONE           0x1
+#define NICVF_RX_OFFLOAD_CKSUM          0x2
+
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)

 #ifndef __hot
@@ -86,9 +89,15 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);

-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-				  uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);

 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cf98f7c1a..a770e6b54 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -55,25 +55,27 @@ union mbuf_initializer {
 };

 struct nicvf_rxq {
+	MARKER rxq_fastpath_data_start;
+	uint8_t  rbptr_offset;
+	uint16_t rx_free_thresh;
+	uint32_t head;
+	uint32_t qlen_mask;
+	int32_t recv_buffers;
+	int32_t available_space;
 	uint64_t mbuf_phys_off;
 	uintptr_t cq_status;
 	uintptr_t cq_door;
-	union mbuf_initializer mbuf_initializer;
-	nicvf_iova_addr_t phys;
-	union cq_entry_t *desc;
 	struct nicvf_rbdr *shared_rbdr;
-	struct nicvf *nic;
 	struct rte_mempool *pool;
-	uint32_t head;
-	uint32_t qlen_mask;
-	int32_t available_space;
-	int32_t recv_buffers;
-	uint16_t rx_free_thresh;
-	uint16_t queue_id;
-	uint16_t precharge_cnt;
+	union cq_entry_t *desc;
+	union mbuf_initializer mbuf_initializer;
+	MARKER rxq_fastpath_data_end;
 	uint8_t rx_drop_en;
+	uint16_t precharge_cnt;
 	uint16_t port_id;
-	uint8_t  rbptr_offset;
+	uint16_t queue_id;
+	struct nicvf *nic;
+	nicvf_iova_addr_t phys;
 } __rte_cache_aligned;

 struct nicvf {
@@ -85,6 +87,7 @@ struct nicvf {
 	bool loopback_supported;
 	bool pf_acked:1;
 	bool pf_nacked:1;
+	bool offload_cksum:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
--
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] net/thunderx: add support for Rx VLAN offload
  2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
@ 2018-07-16  9:26   ` Pavan Nikhilesh
  2018-07-18 13:48   ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Ferruh Yigit
  1 sibling, 0 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-16  9:26 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Kudurumalla, Rakesh, Pavan Nikhilesh

From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>

This feature is used to offload stripping of vlan header from recevied
packets and update vlan_tci field in mbuf when
DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/net/thunderx/base/nicvf_hw.c |  1 +
 drivers/net/thunderx/nicvf_ethdev.c  | 54 ++++++++++++++++++++++------
 drivers/net/thunderx/nicvf_rxtx.c    | 47 ++++++++++++++++++++++++
 drivers/net/thunderx/nicvf_rxtx.h    |  9 +++++
 drivers/net/thunderx/nicvf_struct.h  |  1 +
 5 files changed, 101 insertions(+), 11 deletions(-)

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index b07a2937d..5b1abe201 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,7 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
 	else
 		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
 
+	nic->vlan_strip = enable;
 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
 }
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 8fd52e1c3..e87d41116 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -52,6 +52,8 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
 			  bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
 RTE_INIT(nicvf_init_log);
 static void
@@ -918,16 +920,21 @@ nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 
-	const eth_rx_burst_t rx_burst_func[2][2] = {
-		/* [NORMAL/SCATTER] [NO_CKSUM/CKSUM] */
-		[0][0] = nicvf_recv_pkts_no_offload,
-		[0][1] = nicvf_recv_pkts_cksum,
-		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
-		[1][1] = nicvf_recv_pkts_multiseg_cksum,
+	const eth_rx_burst_t rx_burst_func[2][2][2] = {
+	/* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+		[0][0][0] = nicvf_recv_pkts_no_offload,
+		[0][0][1] = nicvf_recv_pkts_vlan_strip,
+		[0][1][0] = nicvf_recv_pkts_cksum,
+		[0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+		[1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+		[1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+		[1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
 	};
 
 	dev->rx_pkt_burst =
-		rx_burst_func[dev->data->scattered_rx][nic->offload_cksum];
+		rx_burst_func[dev->data->scattered_rx]
+			[nic->offload_cksum][nic->vlan_strip];
 }
 
 static int
@@ -1475,7 +1482,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
-	bool vlan_strip;
+	int mask;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1596,9 +1603,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_STRIP);
-	nicvf_vlan_hw_strip(nic, vlan_strip);
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+		ETH_VLAN_EXTEND_MASK;
+	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1992,6 +1999,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.dev_infos_get            = nicvf_dev_info_get,
 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
 	.mtu_set                  = nicvf_dev_set_mtu,
+	.vlan_offload_set         = nicvf_vlan_offload_set,
 	.reta_update              = nicvf_dev_reta_update,
 	.reta_query               = nicvf_dev_reta_query,
 	.rss_hash_update          = nicvf_dev_rss_hash_update,
@@ -2008,6 +2016,30 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.get_reg                  = nicvf_dev_get_regs,
 };
 
+static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+	struct rte_eth_rxmode *rxmode;
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			nicvf_vlan_hw_strip(nic, true);
+		else
+			nicvf_vlan_hw_strip(nic, false);
+	}
+
+	return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	nicvf_vlan_offload_config(dev, mask);
+
+	return 0;
+}
+
 static inline int
 nicvf_set_first_skip(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fa4ee824a..14479cb7a 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -441,6 +441,14 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
 			pkt->ol_flags = 0;
 		if (flag & NICVF_RX_OFFLOAD_CKSUM)
 			pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+		if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+			if (unlikely(cqe_rx_w0.vlan_stripped)) {
+				pkt->ol_flags |= PKT_RX_VLAN
+							| PKT_RX_VLAN_STRIPPED;
+				pkt->vlan_tci =
+					rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+			}
+		}
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -481,6 +489,22 @@ nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
 			NICVF_RX_OFFLOAD_CKSUM);
 }
 
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 static __rte_always_inline uint16_t __hot
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
@@ -512,6 +536,13 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 		pkt->ol_flags = 0;
 	if (flag & NICVF_RX_OFFLOAD_CKSUM)
 		pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+		if (unlikely(cqe_rx_w0.vlan_stripped)) {
+			pkt->ol_flags |= PKT_RX_VLAN
+				| PKT_RX_VLAN_STRIPPED;
+			pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+		}
+	}
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
 
 	*rx_pkt = pkt;
@@ -590,6 +621,22 @@ nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
 			NICVF_RX_OFFLOAD_CKSUM);
 }
 
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 72daffb60..a39808cb6 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -10,6 +10,7 @@
 
 #define NICVF_RX_OFFLOAD_NONE           0x1
 #define NICVF_RX_OFFLOAD_CKSUM          0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP     0x4
 
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
 
@@ -93,11 +94,19 @@ uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
 		uint16_t pkts);
 uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
 		uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index a770e6b54..dd52f38e5 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -88,6 +88,7 @@ struct nicvf {
 	bool pf_acked:1;
 	bool pf_nacked:1;
 	bool offload_cksum:1;
+	bool vlan_strip:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
-- 
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload
  2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
  2018-07-16  9:26   ` [dpdk-dev] [PATCH v2 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
@ 2018-07-18 13:48   ` Ferruh Yigit
  1 sibling, 0 replies; 12+ messages in thread
From: Ferruh Yigit @ 2018-07-18 13:48 UTC (permalink / raw)
  To: Pavan Nikhilesh, jerin.jacob, santosh.shukla, rkudurumalla; +Cc: dev

On 7/16/2018 10:26 AM, Pavan Nikhilesh wrote:
> Add L3/L4 Rx checksum offload and update capabilities.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
>  v2 Changes:
>  - Add Rx checksum offload support for l3fwd.

Overall looks good to me, any ack from driver maintainers?

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload
  2018-07-01 16:46 [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
  2018-07-04 17:36 ` Ferruh Yigit
  2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
@ 2018-07-18 15:05 ` Pavan Nikhilesh
  2018-07-18 15:05   ` [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
  2018-07-18 17:27   ` [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload Jerin Jacob
  2 siblings, 2 replies; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-18 15:05 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Pavan Nikhilesh

Add L3/L4 Rx checksum offload and update capabilities.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 v3 Changes:
 - rebase on top of next-net

 v2 Changes:
 - Add Rx checksum offload support for l3fwd.

 drivers/net/thunderx/nicvf_ethdev.c | 33 ++++++++-----
 drivers/net/thunderx/nicvf_ethdev.h |  1 +
 drivers/net/thunderx/nicvf_rxtx.c   | 73 +++++++++++++++++++++++++----
 drivers/net/thunderx/nicvf_rxtx.h   | 15 ++++--
 drivers/net/thunderx/nicvf_struct.h | 27 ++++++-----
 5 files changed, 113 insertions(+), 36 deletions(-)

diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 5e15a88a5..eba05fdf0 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -355,11 +355,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	}

 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
-	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
-		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
-		return ptypes;

-	return NULL;
+	/* All Ptypes are supported in all Rx functions. */
+	return ptypes;
 }

 static void
@@ -916,13 +914,18 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 static void
 nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
-	if (dev->data->scattered_rx) {
-		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
-	} else {
-		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
-		dev->rx_pkt_burst = nicvf_recv_pkts;
-	}
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+
+	const eth_rx_burst_t rx_burst_func[2][2] = {
+		/* [NORMAL/SCATTER] [NO_CKSUM/CKSUM] */
+		[0][0] = nicvf_recv_pkts_no_offload,
+		[0][1] = nicvf_recv_pkts_cksum,
+		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][1] = nicvf_recv_pkts_multiseg_cksum,
+	};
+
+	dev->rx_pkt_burst =
+		rx_burst_func[dev->data->scattered_rx][nic->offload_cksum];
 }

 static int
@@ -1243,6 +1246,9 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
 				offsetof(struct rte_mbuf, data_off) != 4);
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
 				offsetof(struct rte_mbuf, data_off) != 6);
+	RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+				offsetof(struct nicvf_rxq,
+					rxq_fastpath_data_start) > 128);
 	mb_def.nb_segs = 1;
 	mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
 	mb_def.port = rxq->port_id;
@@ -1743,7 +1749,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
 			return ret;
 	}

-	/* Configure callbacks based on scatter mode */
+	/* Configure callbacks based on offloads */
 	nicvf_set_tx_function(dev);
 	nicvf_set_rx_function(dev);

@@ -1962,6 +1968,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
 		}
 	}

+	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+		nic->offload_cksum = 1;
+
 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
 		dev->data->port_id, nicvf_hw_cap(nic));

diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 9af508803..ae440fef2 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -38,6 +38,7 @@
 	DEV_TX_OFFLOAD_MULTI_SEGS)

 #define NICVF_RX_OFFLOAD_CAPA ( \
+	DEV_RX_OFFLOAD_CHECKSUM    | \
 	DEV_RX_OFFLOAD_VLAN_STRIP  | \
 	DEV_RX_OFFLOAD_CRC_STRIP   | \
 	DEV_RX_OFFLOAD_JUMBO_FRAME | \
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 6e075e23c..4980dab79 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -331,6 +331,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
 	return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
 }

+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+	static const uint64_t flag_table[3] __rte_cache_aligned = {
+		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+		PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+		PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+	};
+
+	const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+		(cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+	return flag_table[idx];
+}
+
 static inline int __hot
 nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
 {
@@ -389,11 +403,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
 	if (likely(cqe_rx_w0.rss_alg)) {
 		pkt->hash.rss = cqe_rx_w2.rss_tag;
 		pkt->ol_flags |= PKT_RX_RSS_HASH;
+
 	}
 }

-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+		const uint32_t flag)
 {
 	uint32_t i, to_process;
 	struct cqe_rx_t *cqe_rx;
@@ -424,7 +440,11 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
 		pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 				(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-		pkt->ol_flags = 0;
+
+		if (flag & NICVF_RX_OFFLOAD_NONE)
+			pkt->ol_flags = 0;
+		if (flag & NICVF_RX_OFFLOAD_CKSUM)
+			pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -449,11 +469,27 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	return to_process;
 }

-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM);
+}
+
+static __rte_always_inline uint16_t __hot
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
 			struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
-			uint64_t mbuf_init)
+			uint64_t mbuf_init, const uint32_t flag)
 {
 	struct rte_mbuf *pkt, *seg, *prev;
 	cqe_rx_word0_t cqe_rx_w0;
@@ -471,12 +507,15 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
 			(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);

-	pkt->ol_flags = 0;
 	pkt->pkt_len = cqe_rx_w1.pkt_len;
 	pkt->data_len = rb_sz[nicvf_frag_num(0)];
 	nicvf_mbuff_init_mseg_update(
 				pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
 	pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_NONE)
+		pkt->ol_flags = 0;
+	if (flag & NICVF_RX_OFFLOAD_CKSUM)
+		pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);

 	*rx_pkt = pkt;
@@ -495,9 +534,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 	return nb_segs;
 }

-uint16_t __hot
+static __rte_always_inline uint16_t __hot
 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-			 uint16_t nb_pkts)
+			 uint16_t nb_pkts, const uint32_t flag)
 {
 	union cq_entry_t *cq_entry;
 	struct cqe_rx_t *cqe_rx;
@@ -519,7 +558,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 		cq_entry = &desc[cqe_head];
 		cqe_rx = (struct cqe_rx_t *)cq_entry;
 		nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
-			rx_pkts + i, rbptr_offset, mbuf_init);
+			rx_pkts + i, rbptr_offset, mbuf_init, flag);
 		buffers_consumed += nb_segs;
 		cqe_head = (cqe_head + 1) & cqe_mask;
 		nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -539,6 +578,22 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return to_process;
 }

+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582ed..72daffb60 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,9 @@
 #include <rte_byteorder.h>
 #include <rte_ethdev_driver.h>

+#define NICVF_RX_OFFLOAD_NONE           0x1
+#define NICVF_RX_OFFLOAD_CKSUM          0x2
+
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)

 #ifndef __hot
@@ -86,9 +89,15 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
 uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
 uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);

-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
-				  uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+		uint16_t pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);

 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index cf98f7c1a..a770e6b54 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -55,25 +55,27 @@ union mbuf_initializer {
 };

 struct nicvf_rxq {
+	MARKER rxq_fastpath_data_start;
+	uint8_t  rbptr_offset;
+	uint16_t rx_free_thresh;
+	uint32_t head;
+	uint32_t qlen_mask;
+	int32_t recv_buffers;
+	int32_t available_space;
 	uint64_t mbuf_phys_off;
 	uintptr_t cq_status;
 	uintptr_t cq_door;
-	union mbuf_initializer mbuf_initializer;
-	nicvf_iova_addr_t phys;
-	union cq_entry_t *desc;
 	struct nicvf_rbdr *shared_rbdr;
-	struct nicvf *nic;
 	struct rte_mempool *pool;
-	uint32_t head;
-	uint32_t qlen_mask;
-	int32_t available_space;
-	int32_t recv_buffers;
-	uint16_t rx_free_thresh;
-	uint16_t queue_id;
-	uint16_t precharge_cnt;
+	union cq_entry_t *desc;
+	union mbuf_initializer mbuf_initializer;
+	MARKER rxq_fastpath_data_end;
 	uint8_t rx_drop_en;
+	uint16_t precharge_cnt;
 	uint16_t port_id;
-	uint8_t  rbptr_offset;
+	uint16_t queue_id;
+	struct nicvf *nic;
+	nicvf_iova_addr_t phys;
 } __rte_cache_aligned;

 struct nicvf {
@@ -85,6 +87,7 @@ struct nicvf {
 	bool loopback_supported;
 	bool pf_acked:1;
 	bool pf_nacked:1;
+	bool offload_cksum:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
--
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload
  2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
@ 2018-07-18 15:05   ` Pavan Nikhilesh
  2018-07-18 17:30     ` Jerin Jacob
  2018-07-18 17:27   ` [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload Jerin Jacob
  1 sibling, 1 reply; 12+ messages in thread
From: Pavan Nikhilesh @ 2018-07-18 15:05 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, rkudurumalla, ferruh.yigit
  Cc: dev, Kudurumalla, Rakesh, Pavan Nikhilesh

From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>

This feature is used to offload stripping of vlan header from recevied
packets and update vlan_tci field in mbuf when
DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.

Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/net/thunderx/base/nicvf_hw.c |  1 +
 drivers/net/thunderx/nicvf_ethdev.c  | 54 ++++++++++++++++++++++------
 drivers/net/thunderx/nicvf_rxtx.c    | 47 ++++++++++++++++++++++++
 drivers/net/thunderx/nicvf_rxtx.h    |  9 +++++
 drivers/net/thunderx/nicvf_struct.h  |  1 +
 5 files changed, 101 insertions(+), 11 deletions(-)

diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index b07a2937d..5b1abe201 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,7 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
 	else
 		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
 
+	nic->vlan_strip = enable;
 	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
 }
 
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index eba05fdf0..a55c3ca66 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -52,6 +52,8 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
 			  bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
 RTE_INIT(nicvf_init_log)
 {
@@ -916,16 +918,21 @@ nicvf_set_rx_function(struct rte_eth_dev *dev)
 {
 	struct nicvf *nic = nicvf_pmd_priv(dev);
 
-	const eth_rx_burst_t rx_burst_func[2][2] = {
-		/* [NORMAL/SCATTER] [NO_CKSUM/CKSUM] */
-		[0][0] = nicvf_recv_pkts_no_offload,
-		[0][1] = nicvf_recv_pkts_cksum,
-		[1][0] = nicvf_recv_pkts_multiseg_no_offload,
-		[1][1] = nicvf_recv_pkts_multiseg_cksum,
+	const eth_rx_burst_t rx_burst_func[2][2][2] = {
+	/* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+		[0][0][0] = nicvf_recv_pkts_no_offload,
+		[0][0][1] = nicvf_recv_pkts_vlan_strip,
+		[0][1][0] = nicvf_recv_pkts_cksum,
+		[0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+		[1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+		[1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+		[1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+		[1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
 	};
 
 	dev->rx_pkt_burst =
-		rx_burst_func[dev->data->scattered_rx][nic->offload_cksum];
+		rx_burst_func[dev->data->scattered_rx]
+			[nic->offload_cksum][nic->vlan_strip];
 }
 
 static int
@@ -1473,7 +1480,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 	struct rte_mbuf *mbuf;
 	uint16_t rx_start, rx_end;
 	uint16_t tx_start, tx_end;
-	bool vlan_strip;
+	int mask;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1594,9 +1601,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
 	/* Configure VLAN Strip */
-	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
-			DEV_RX_OFFLOAD_VLAN_STRIP);
-	nicvf_vlan_hw_strip(nic, vlan_strip);
+	mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+		ETH_VLAN_EXTEND_MASK;
+	ret = nicvf_vlan_offload_config(dev, mask);
 
 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
 	 * to the 64bit memory address.
@@ -1990,6 +1997,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.dev_infos_get            = nicvf_dev_info_get,
 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
 	.mtu_set                  = nicvf_dev_set_mtu,
+	.vlan_offload_set         = nicvf_vlan_offload_set,
 	.reta_update              = nicvf_dev_reta_update,
 	.reta_query               = nicvf_dev_reta_query,
 	.rss_hash_update          = nicvf_dev_rss_hash_update,
@@ -2006,6 +2014,30 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
 	.get_reg                  = nicvf_dev_get_regs,
 };
 
+static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+	struct rte_eth_rxmode *rxmode;
+	struct nicvf *nic = nicvf_pmd_priv(dev);
+	rxmode = &dev->data->dev_conf.rxmode;
+	if (mask & ETH_VLAN_STRIP_MASK) {
+		if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+			nicvf_vlan_hw_strip(nic, true);
+		else
+			nicvf_vlan_hw_strip(nic, false);
+	}
+
+	return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+	nicvf_vlan_offload_config(dev, mask);
+
+	return 0;
+}
+
 static inline int
 nicvf_set_first_skip(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 4980dab79..247c35685 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -445,6 +445,14 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
 			pkt->ol_flags = 0;
 		if (flag & NICVF_RX_OFFLOAD_CKSUM)
 			pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+		if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+			if (unlikely(cqe_rx_w0.vlan_stripped)) {
+				pkt->ol_flags |= PKT_RX_VLAN
+							| PKT_RX_VLAN_STRIPPED;
+				pkt->vlan_tci =
+					rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+			}
+		}
 		pkt->data_len = cqe_rx_w3.rb0_sz;
 		pkt->pkt_len = cqe_rx_w3.rb0_sz;
 		pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -485,6 +493,22 @@ nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
 			NICVF_RX_OFFLOAD_CKSUM);
 }
 
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 static __rte_always_inline uint16_t __hot
 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 			uint64_t mbuf_phys_off,
@@ -516,6 +540,13 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
 		pkt->ol_flags = 0;
 	if (flag & NICVF_RX_OFFLOAD_CKSUM)
 		pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+	if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+		if (unlikely(cqe_rx_w0.vlan_stripped)) {
+			pkt->ol_flags |= PKT_RX_VLAN
+				| PKT_RX_VLAN_STRIPPED;
+			pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+		}
+	}
 	nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
 
 	*rx_pkt = pkt;
@@ -594,6 +625,22 @@ nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
 			NICVF_RX_OFFLOAD_CKSUM);
 }
 
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+			NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
 uint32_t
 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 72daffb60..a39808cb6 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -10,6 +10,7 @@
 
 #define NICVF_RX_OFFLOAD_NONE           0x1
 #define NICVF_RX_OFFLOAD_CKSUM          0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP     0x4
 
 #define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
 
@@ -93,11 +94,19 @@ uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
 		uint16_t pkts);
 uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
 		uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
 		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+		struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
 uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
 uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index a770e6b54..dd52f38e5 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -88,6 +88,7 @@ struct nicvf {
 	bool pf_acked:1;
 	bool pf_nacked:1;
 	bool offload_cksum:1;
+	bool vlan_strip:1;
 	uint64_t hwcap;
 	uint8_t link_up;
 	uint8_t	duplex;
-- 
2.18.0

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload
  2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
  2018-07-18 15:05   ` [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
@ 2018-07-18 17:27   ` Jerin Jacob
  2018-07-19 13:15     ` Ferruh Yigit
  1 sibling, 1 reply; 12+ messages in thread
From: Jerin Jacob @ 2018-07-18 17:27 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: santosh.shukla, rkudurumalla, ferruh.yigit, dev

-----Original Message-----
> Date: Wed, 18 Jul 2018 20:35:01 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>  rkudurumalla@caviumnetworks.com, ferruh.yigit@intel.com
> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload
> X-Mailer: git-send-email 2.18.0
> 
> Add L3/L4 Rx checksum offload and update capabilities.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>


Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload
  2018-07-18 15:05   ` [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
@ 2018-07-18 17:30     ` Jerin Jacob
  0 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2018-07-18 17:30 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: santosh.shukla, rkudurumalla, ferruh.yigit, dev, Kudurumalla, Rakesh

-----Original Message-----
> Date: Wed, 18 Jul 2018 20:35:02 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>  rkudurumalla@caviumnetworks.com, ferruh.yigit@intel.com
> Cc: dev@dpdk.org, "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>,
>  Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN
>  offload
> X-Mailer: git-send-email 2.18.0
> 
> From: "Kudurumalla, Rakesh" <rakesh.kudurumalla@cavium.com>
> 
> This feature is used to offload stripping of vlan header from recevied
> packets and update vlan_tci field in mbuf when
> DEV_RX_OFFLOAD_VLAN_STRIP & ETH_VLAN_STRIP_MASK flag is set.
> 
> Signed-off-by: Rakesh Kudurumalla <rkudurumalla@caviumnetworks.com>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload
  2018-07-18 17:27   ` [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload Jerin Jacob
@ 2018-07-19 13:15     ` Ferruh Yigit
  0 siblings, 0 replies; 12+ messages in thread
From: Ferruh Yigit @ 2018-07-19 13:15 UTC (permalink / raw)
  To: Jerin Jacob, Pavan Nikhilesh; +Cc: santosh.shukla, rkudurumalla, dev

On 7/18/2018 6:27 PM, Jerin Jacob wrote:
> -----Original Message-----
>> Date: Wed, 18 Jul 2018 20:35:01 +0530
>> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>>  rkudurumalla@caviumnetworks.com, ferruh.yigit@intel.com
>> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
>> Subject: [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload
>> X-Mailer: git-send-email 2.18.0
>>
>> Add L3/L4 Rx checksum offload and update capabilities.
>>
>> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> 
> Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-07-19 13:15 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-01 16:46 [dpdk-dev] [PATCH] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-04 17:36 ` Ferruh Yigit
2018-07-13 14:16   ` rkudurumalla
2018-07-14  8:02     ` Andrew Rybchenko
2018-07-16  9:26 ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Pavan Nikhilesh
2018-07-16  9:26   ` [dpdk-dev] [PATCH v2 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-18 13:48   ` [dpdk-dev] [PATCH v2 1/2] net/thunderx: enable Rx checksum offload Ferruh Yigit
2018-07-18 15:05 ` [dpdk-dev] [PATCH v3 " Pavan Nikhilesh
2018-07-18 15:05   ` [dpdk-dev] [PATCH v3 2/2] net/thunderx: add support for Rx VLAN offload Pavan Nikhilesh
2018-07-18 17:30     ` Jerin Jacob
2018-07-18 17:27   ` [dpdk-dev] [PATCH v3 1/2] net/thunderx: enable Rx checksum offload Jerin Jacob
2018-07-19 13:15     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).