patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 01/11] net/qede/base: fix VF LRO tunnel params configuration
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
@ 2018-01-27 21:15 ` Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 02/11] net/qede: initialize VF tunnel as enabled on start Rasesh Mody
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, ferruh.yigit, Dept-EngDPDKDev, stable

From: Harish Patil <harish.patil@cavium.com>

Add missing LRO tunnel configuration parameters for VF over sriov channel.

Fixes: ec94dbc57362 ("qede: add base driver")
Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/base/ecore_vf.c      |    6 ++++++
 drivers/net/qede/base/ecore_vfpf_if.h |    2 ++
 2 files changed, 8 insertions(+)

diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index 25109db..e0f2dd5 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1385,6 +1385,12 @@ enum _ecore_status_t
 		if (sge_tpa_params->tpa_gro_consistent_flg)
 			p_sge_tpa_tlv->sge_tpa_flags |=
 			    VFPF_TPA_GRO_CONSIST_FLAG;
+		if (sge_tpa_params->tpa_ipv4_tunn_en_flg)
+			p_sge_tpa_tlv->sge_tpa_flags |=
+			    VFPF_TPA_TUNN_IPV4_EN_FLAG;
+		if (sge_tpa_params->tpa_ipv6_tunn_en_flg)
+			p_sge_tpa_tlv->sge_tpa_flags |=
+			    VFPF_TPA_TUNN_IPV6_EN_FLAG;
 
 		p_sge_tpa_tlv->tpa_max_aggs_num =
 		    sge_tpa_params->tpa_max_aggs_num;
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 3ccc766..ecb0064 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -424,6 +424,8 @@ struct vfpf_vport_update_sge_tpa_tlv {
 	#define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
 	#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
 	#define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)
+	#define VFPF_TPA_TUNN_IPV4_EN_FLAG   (1 << 5)
+	#define VFPF_TPA_TUNN_IPV6_EN_FLAG   (1 << 6)
 
 	u8			update_sge_tpa_flags;
 	#define VFPF_UPDATE_SGE_DEPRECATED_FLAG	   (1 << 0)
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH 02/11] net/qede: initialize VF tunnel as enabled on start
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 01/11] net/qede/base: fix VF LRO tunnel params configuration Rasesh Mody
@ 2018-01-27 21:15 ` Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 03/11] net/qede: fix to check if tunnel L3 header is valid Rasesh Mody
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, ferruh.yigit, Dept-EngDPDKDev, stable

From: Harish Patil <harish.patil@cavium.com>

By default, the PF driver enables tunnel offload for its child VF.
So mark tunnel offloads as enabled in the VF driver to reflect the
actual state.

Fixes: 52d94b57e1c7 ("net/qede: add slowpath support for VXLAN tunneling")
Fixes: d378cefab84e ("net/qede: add support for GENEVE tunneling offload")

Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 8d82661..8a69d20 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -3040,9 +3040,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 	SLIST_INIT(&adapter->uc_list_head);
 	adapter->mtu = ETHER_MTU;
 	adapter->new_mtu = ETHER_MTU;
-	if (!is_vf)
+	if (!is_vf) {
 		if (qede_start_vport(adapter, adapter->mtu))
 			return -1;
+	} else {
+		/* VF tunnel offloads is enabled by default in PF driver */
+		adapter->vxlan.enable = true;
+		adapter->vxlan.num_filters = 0;
+		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
+		adapter->geneve.enable = true;
+		adapter->vxlan.num_filters = 0;
+		adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+					     ETH_TUNNEL_FILTER_IVLAN;
+		adapter->vxlan.udp_port = QEDE_GENEVE_DEF_PORT;
+	}
 
 	DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
 		adapter->primary_mac.addr_bytes[0],
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH 03/11] net/qede: fix to check if tunnel L3 header is valid
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 01/11] net/qede/base: fix VF LRO tunnel params configuration Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 02/11] net/qede: initialize VF tunnel as enabled on start Rasesh Mody
@ 2018-01-27 21:15 ` Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 04/11] net/qede: fix tunnel header size in Tx BD configuration Rasesh Mody
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, ferruh.yigit, Dept-EngDPDKDev, stable

From: Harish Patil <harish.patil@cavium.com>

- Add a check to verify tunnel IP header checksum is valid and mark MBUF
flag as appropriate.
- Bit of refactoring so that inner frame handling for tunneled packets can
be made common as regular (non-tunneled) packets.
- make qede_tunn_exist() as inline.
- remove RTE_PTYPE_L2_ETHER as default L2 pkt_type.

Fixes: 3d4bb4411683 ("net/qede: add fastpath support for VXLAN tunneling")

Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_rxtx.c |  143 +++++++++++++++++++++++++++---------------
 1 file changed, 91 insertions(+), 52 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 184f0e1..2af1bfc 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -812,12 +812,18 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev)
 	}
 }
 
-static bool qede_tunn_exist(uint16_t flag)
+static inline bool qede_tunn_exist(uint16_t flag)
 {
 	return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
 		    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
 }
 
+static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
+{
+	return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+		PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
+}
+
 /*
  * qede_check_tunn_csum_l4:
  * Returns:
@@ -844,33 +850,51 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
 	return 0;
 }
 
-/* Returns outer L3 and L4 packet_type for tunneled packets */
+/* Returns outer L2, L3 and L4 packet_type for tunneled packets */
 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
 {
 	uint32_t packet_type = RTE_PTYPE_UNKNOWN;
 	struct ether_hdr *eth_hdr;
 	struct ipv4_hdr *ipv4_hdr;
 	struct ipv6_hdr *ipv6_hdr;
+	struct vlan_hdr *vlan_hdr;
+	uint16_t ethertype;
+	bool vlan_tagged = 0;
+	uint16_t len;
 
 	eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
-	if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+	len = sizeof(struct ether_hdr);
+	ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
+
+	 /* Note: Valid only if VLAN stripping is disabled */
+	if (ethertype == ETHER_TYPE_VLAN) {
+		vlan_tagged = 1;
+		vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+		len += sizeof(struct vlan_hdr);
+		ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
+	}
+
+	if (ethertype == ETHER_TYPE_IPv4) {
 		packet_type |= RTE_PTYPE_L3_IPV4;
-		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
-						   sizeof(struct ether_hdr));
+		ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
 		if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
 			packet_type |= RTE_PTYPE_L4_TCP;
 		else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
 			packet_type |= RTE_PTYPE_L4_UDP;
-	} else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
+	} else if (ethertype == ETHER_TYPE_IPv6) {
 		packet_type |= RTE_PTYPE_L3_IPV6;
-		ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
-						   sizeof(struct ether_hdr));
+		ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
 		if (ipv6_hdr->proto == IPPROTO_TCP)
 			packet_type |= RTE_PTYPE_L4_TCP;
 		else if (ipv6_hdr->proto == IPPROTO_UDP)
 			packet_type |= RTE_PTYPE_L4_UDP;
 	}
 
+	if (vlan_tagged)
+		packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+	else
+		packet_type |= RTE_PTYPE_L2_ETHER;
+
 	return packet_type;
 }
 
@@ -1163,17 +1187,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		[QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
 		[QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
-				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_GENEVE,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
-				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_GRE,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
-				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_VXLAN,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
-				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_GENEVE,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
-				RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_GRE,
 		[QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
-				RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+				RTE_PTYPE_TUNNEL_VXLAN,
 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
 				RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
 		[QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
@@ -1253,7 +1277,7 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		 uint8_t bitfield)
 {
 	PMD_RX_LOG(INFO, rxq,
-		"len 0x%x bf 0x%x hash_val 0x%x"
+		"len 0x%04x bf 0x%04x hash_val 0x%x"
 		" ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
 		" inner_l2=%s inner_l3=%s inner_l4=%s\n",
 		m->data_len, bitfield, m->hash.rss,
@@ -1404,47 +1428,62 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 				ol_flags |= PKT_RX_L4_CKSUM_BAD;
 			} else {
 				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
-				if (tpa_start_flg)
-					flags =
-					 cqe_start_tpa->tunnel_pars_flags.flags;
-				else
-					flags = fp_cqe->tunnel_pars_flags.flags;
-				tunn_parse_flag = flags;
-				/* Tunnel_type */
-				packet_type =
-				qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
-
-				/* Inner header */
-				packet_type |=
-				      qede_rx_cqe_to_pkt_type_inner(parse_flag);
-
-				/* Outer L3/L4 types is not available in CQE */
-				packet_type |=
-				      qede_rx_cqe_to_pkt_type_outer(rx_mb);
 			}
-		} else {
-			PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
-			if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
-				PMD_RX_LOG(ERR, rxq,
-					    "L4 csum failed, flags = 0x%x\n",
-					    parse_flag);
-				rxq->rx_hw_errors++;
-				ol_flags |= PKT_RX_L4_CKSUM_BAD;
-			} else {
-				ol_flags |= PKT_RX_L4_CKSUM_GOOD;
-			}
-			if (unlikely(qede_check_notunn_csum_l3(rx_mb,
-							parse_flag))) {
+
+			if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
 				PMD_RX_LOG(ERR, rxq,
-					   "IP csum failed, flags = 0x%x\n",
-					   parse_flag);
-				rxq->rx_hw_errors++;
-				ol_flags |= PKT_RX_IP_CKSUM_BAD;
+					"Outer L3 csum failed, flags = 0x%x\n",
+					parse_flag);
+				  rxq->rx_hw_errors++;
+				  ol_flags |= PKT_RX_EIP_CKSUM_BAD;
 			} else {
-				ol_flags |= PKT_RX_IP_CKSUM_GOOD;
-				packet_type =
-					qede_rx_cqe_to_pkt_type(parse_flag);
+				  ol_flags |= PKT_RX_IP_CKSUM_GOOD;
 			}
+
+			if (tpa_start_flg)
+				flags = cqe_start_tpa->tunnel_pars_flags.flags;
+			else
+				flags = fp_cqe->tunnel_pars_flags.flags;
+			tunn_parse_flag = flags;
+
+			/* Tunnel_type */
+			packet_type =
+				qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+			/* Inner header */
+			packet_type |=
+			      qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+			/* Outer L3/L4 types is not available in CQE */
+			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+			/* Outer L3/L4 types is not available in CQE.
+			 * Need to add offset to parse correctly,
+			 */
+			rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+			packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+		}
+
+		/* Common handling for non-tunnel packets and for inner
+		 * headers in the case of tunnel.
+		 */
+		if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+			PMD_RX_LOG(ERR, rxq,
+				    "L4 csum failed, flags = 0x%x\n",
+				    parse_flag);
+			rxq->rx_hw_errors++;
+			ol_flags |= PKT_RX_L4_CKSUM_BAD;
+		} else {
+			ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+		}
+		if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+			PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+				   parse_flag);
+			rxq->rx_hw_errors++;
+			ol_flags |= PKT_RX_IP_CKSUM_BAD;
+		} else {
+			ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+			packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
 		}
 
 		if (CQE_HAS_VLAN(parse_flag) ||
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH 04/11] net/qede: fix tunnel header size in Tx BD configuration
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
                   ` (2 preceding siblings ...)
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 03/11] net/qede: fix to check if tunnel L3 header is valid Rasesh Mody
@ 2018-01-27 21:15 ` Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 06/11] net/qede: fix MTU set and max Rx pkt len usage Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 07/11] net/qede: fix clearing of queue stats Rasesh Mody
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, ferruh.yigit, Dept-EngDPDKDev, stable

From: Harish Patil <harish.patil@cavium.com>

- Fix incorrect header size. In the tunnel case, the outer L2/L3 lengths
should be included to calculate tunnel header_size.
- In TSO case, skip manipulating TX BD1 and TX BD2 data buffer fields
since those fields are already updated with header and payload lengths
respectively.
- Update TX BD debug data collection.

Fixes: 3d4bb4411683 ("net/qede: add fastpath support for VXLAN tunneling")

Cc: stable@dpdk.org

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_rxtx.c |   40 +++++++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 2af1bfc..df248cf 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1588,7 +1588,8 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 /* Populate scatter gather buffer descriptor fields */
 static inline uint16_t
 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
-		  struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
+		  struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
+		  uint16_t start_seg)
 {
 	struct qede_tx_queue *txq = p_txq;
 	struct eth_tx_bd *tx_bd = NULL;
@@ -1597,7 +1598,7 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 
 	/* Check for scattered buffers */
 	while (m_seg) {
-		if (nb_segs == 0) {
+		if (start_seg == 0) {
 			if (!*bd2) {
 				*bd2 = (struct eth_tx_2nd_bd *)
 					ecore_chain_produce(&txq->tx_pbl);
@@ -1607,7 +1608,7 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			mapping = rte_mbuf_data_iova(m_seg);
 			QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
 			PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
-		} else if (nb_segs == 1) {
+		} else if (start_seg == 1) {
 			if (!*bd3) {
 				*bd3 = (struct eth_tx_3rd_bd *)
 					ecore_chain_produce(&txq->tx_pbl);
@@ -1645,20 +1646,24 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 
 	if (bd1)
 		PMD_TX_LOG(INFO, txq,
-			   "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
-			   rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
-			   bd1->data.bd_flags.bitfields,
-			   rte_cpu_to_le_16(bd1->data.bitfields));
+		   "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
+		   rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+		   bd1->data.bd_flags.bitfields,
+		   rte_cpu_to_le_16(bd1->data.bitfields));
 	if (bd2)
 		PMD_TX_LOG(INFO, txq,
-			   "BD2: nbytes=%u bf=%04x\n",
-			   rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+		   "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
+		   rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
+		   bd2->data.bitfields2, bd2->data.tunn_ip_size);
 	if (bd3)
 		PMD_TX_LOG(INFO, txq,
-			   "BD3: nbytes=%u bf=%04x mss=%u\n",
-			   rte_cpu_to_le_16(bd3->nbytes),
-			   rte_cpu_to_le_16(bd3->data.bitfields),
-			   rte_cpu_to_le_16(bd3->data.lso_mss));
+		   "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
+		   "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
+		   rte_cpu_to_le_16(bd3->nbytes),
+		   rte_cpu_to_le_16(bd3->data.bitfields),
+		   rte_cpu_to_le_16(bd3->data.lso_mss),
+		   bd3->data.tunn_l4_hdr_start_offset_w,
+		   bd3->data.tunn_hdr_size_w);
 
 	rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
 	PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
@@ -1938,6 +1943,10 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 			 * and BD2 onwards for data.
 			 */
 			hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+			if (tunn_flg)
+				hdr_size += mbuf->outer_l2_len +
+					    mbuf->outer_l3_len;
+
 			bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
 			bd1_bd_flags_bf |=
 					1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
@@ -2054,9 +2063,11 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 
 		/* Handle fragmented MBUF */
 		m_seg = mbuf->next;
+
 		/* Encode scatter gather buffer descriptors if required */
-		nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+		nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
 		bd1->data.nbds = nbds + nb_frags;
+
 		txq->nb_tx_avail -= bd1->data.nbds;
 		txq->sw_tx_prod++;
 		rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
@@ -2064,7 +2075,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
 		print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
-		PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
 #endif
 		nb_pkt_sent++;
 		txq->xmit_pkts++;
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH 06/11] net/qede: fix MTU set and max Rx pkt len usage
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
                   ` (3 preceding siblings ...)
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 04/11] net/qede: fix tunnel header size in Tx BD configuration Rasesh Mody
@ 2018-01-27 21:15 ` Rasesh Mody
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 07/11] net/qede: fix clearing of queue stats Rasesh Mody
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev, stable

This patch fixes issues related to MTU set and max_rx_pkt_len usage.
 - Adjust MTU during device configuration when jumbo is enabled

 - In qede_set_mtu():
   Return not supported for VF as currently we do not support it.

   Cache new mtu value in mtu_new for proper update.

   Add check for RXQ allocation before calculating RX buffer size
   if not allocated defer RX buffer size calculation till RXQ setup.

   Add check for before performing device start/stop.

 - Use max_rx_pkt_len appropriately

 - Change QEDE_ETH_OVERHEAD macro to adjust driver specifics

Fixes: 4c4bdadfa9e7 ("net/qede: refactoring multi-queue implementation")
Fixes: 9a6d30ae6d46 ("net/qede: refactoring vport handling code")
Fixes: 1ef4c3a5c1f7 ("net/qede: prevent crash while changing MTU dynamically")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   63 +++++++++++++++++++++++++++-------------
 drivers/net/qede/qede_rxtx.c   |    6 ++--
 drivers/net/qede/qede_rxtx.h   |    2 +-
 3 files changed, 47 insertions(+), 24 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 323e8ed..895a0da 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1414,18 +1414,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 			return -ENOMEM;
 	}
 
+	/* If jumbo enabled adjust MTU */
+	if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+		eth_dev->data->mtu =
+				eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+				ETHER_HDR_LEN - ETHER_CRC_LEN;
+
 	/* VF's MTU has to be set using vport-start where as
 	 * PF's MTU can be updated via vport-update.
 	 */
 	if (IS_VF(edev)) {
-		if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+		if (qede_start_vport(qdev, eth_dev->data->mtu))
 			return -1;
 	} else {
-		if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+		if (qede_update_mtu(eth_dev, eth_dev->data->mtu))
 			return -1;
 	}
 
-	qdev->mtu = rxmode->max_rx_pkt_len;
+	qdev->mtu = eth_dev->data->mtu;
 	qdev->new_mtu = qdev->mtu;
 
 	/* Enable VLAN offloads by default */
@@ -2306,16 +2312,23 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 	struct rte_eth_dev_info dev_info = {0};
 	struct qede_fastpath *fp;
+	uint32_t max_rx_pkt_len;
 	uint32_t frame_size;
 	uint16_t rx_buf_size;
 	uint16_t bufsz;
+	bool restart = false;
 	int i;
 
 	PMD_INIT_FUNC_TRACE(edev);
+	if (IS_VF(edev))
+		return -ENOTSUP;
 	qede_dev_info_get(dev, &dev_info);
-	frame_size = mtu + QEDE_ETH_OVERHEAD;
+	max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+	frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
 	if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
-		DP_ERR(edev, "MTU %u out of range\n", mtu);
+		DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+		       mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
+			ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
 		return -EINVAL;
 	}
 	if (!dev->data->scattered_rx &&
@@ -2329,29 +2342,39 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 	 */
 	dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
 	dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
-	qede_dev_stop(dev);
+	if (dev->data->dev_started) {
+		dev->data->dev_started = 0;
+		qede_dev_stop(dev);
+		restart = true;
+	}
 	rte_delay_ms(1000);
-	qdev->mtu = mtu;
+	qdev->new_mtu = mtu;
 	/* Fix up RX buf size for all queues of the port */
 	for_each_rss(i) {
 		fp = &qdev->fp_array[i];
-		bufsz = (uint16_t)rte_pktmbuf_data_room_size(
-			fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
-		if (dev->data->scattered_rx)
-			rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-		else
-			rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
-		rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
-		fp->rxq->rx_buf_size = rx_buf_size;
-		DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
-	}
-	qede_dev_start(dev);
-	if (frame_size > ETHER_MAX_LEN)
+		if (fp->rxq != NULL) {
+			bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+				fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+			if (dev->data->scattered_rx)
+				rx_buf_size = bufsz + ETHER_HDR_LEN +
+					      ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
+			else
+				rx_buf_size = frame_size;
+			rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+			fp->rxq->rx_buf_size = rx_buf_size;
+			DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+		}
+	}
+	if (max_rx_pkt_len > ETHER_MAX_LEN)
 		dev->data->dev_conf.rxmode.jumbo_frame = 1;
 	else
 		dev->data->dev_conf.rxmode.jumbo_frame = 0;
+	if (!dev->data->dev_started && restart) {
+		qede_dev_start(dev);
+		dev->data->dev_started = 1;
+	}
 	/* update max frame size */
-	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+	dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
 	/* Reassign back */
 	dev->rx_pkt_burst = qede_recv_pkts;
 	dev->tx_pkt_burst = qede_xmit_pkts;
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index df248cf..810f0f3 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -84,7 +84,6 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 	rxq->port_id = dev->data->port_id;
 
 	max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
-	qdev->mtu = max_rx_pkt_len;
 
 	/* Fix up RX buffer size */
 	bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
@@ -97,9 +96,10 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
 	}
 
 	if (dev->data->scattered_rx)
-		rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+		rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
+				   ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
 	else
-		rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+		rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
 	/* Align to cache-line size if needed */
 	rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
 
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 6214c97..f1d3666 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -64,7 +64,7 @@
 #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
 					~(QEDE_FW_RX_ALIGN_END - 1))
 /* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD	((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+#define QEDE_ETH_OVERHEAD	(((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
 				+ (QEDE_LLC_SNAP_HDR_LEN))
 
 #define QEDE_RSS_OFFLOAD_ALL    (ETH_RSS_IPV4			|\
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH 07/11] net/qede: fix clearing of queue stats
       [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
                   ` (4 preceding siblings ...)
  2018-01-27 21:15 ` [dpdk-stable] [PATCH 06/11] net/qede: fix MTU set and max Rx pkt len usage Rasesh Mody
@ 2018-01-27 21:15 ` Rasesh Mody
  5 siblings, 0 replies; 6+ messages in thread
From: Rasesh Mody @ 2018-01-27 21:15 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, ferruh.yigit, Dept-EngDPDKDev, stable

Add support to clear the per queue statistics thereby clearing xstats
counters.
Fixes: 7634c5f91569 ("net/qede: add queue statistics")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |   58 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 895a0da..cab5059 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -387,6 +387,60 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
 	DP_INFO(edev, "*********************************\n");
 }
 
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+	struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+	unsigned int i = 0, j = 0, qid;
+	unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+	struct qede_tx_queue *txq;
+
+	DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
+	txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+			       RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+	for_each_rss(qid) {
+		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			     offsetof(struct qede_rx_queue, rcv_pkts), 0,
+			    sizeof(uint64_t));
+		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			     offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+			    sizeof(uint64_t));
+		OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+			     offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+			    sizeof(uint64_t));
+
+		if (xstats)
+			for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+				OSAL_MEMSET((((char *)
+					      (qdev->fp_array[qid].rxq)) +
+					     qede_rxq_xstats_strings[j].offset),
+					    0,
+					    sizeof(uint64_t));
+
+		i++;
+		if (i == rxq_stat_cntrs)
+			break;
+	}
+
+	i = 0;
+
+	for_each_tss(qid) {
+		txq = qdev->fp_array[qid].txq;
+
+		OSAL_MEMSET((uint64_t *)(uintptr_t)
+				(((uint64_t)(uintptr_t)(txq)) +
+				 offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+			    sizeof(uint64_t));
+
+		i++;
+		if (i == txq_stat_cntrs)
+			break;
+	}
+}
+
 static int
 qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
 {
@@ -412,6 +466,8 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
 		}
 	}
 	ecore_reset_vport_stats(edev);
+	if (IS_PF(edev))
+		qede_reset_queue_stats(qdev, true);
 	DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
 
 	return 0;
@@ -1885,6 +1941,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	struct ecore_dev *edev = &qdev->edev;
 
 	ecore_reset_vport_stats(edev);
+	qede_reset_queue_stats(qdev, true);
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1920,6 +1977,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev)
 	struct ecore_dev *edev = &qdev->edev;
 
 	ecore_reset_vport_stats(edev);
+	qede_reset_queue_stats(qdev, false);
 }
 
 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-01-27 21:16 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1517087735-16191-1-git-send-email-rasesh.mody@cavium.com>
2018-01-27 21:15 ` [dpdk-stable] [PATCH 01/11] net/qede/base: fix VF LRO tunnel params configuration Rasesh Mody
2018-01-27 21:15 ` [dpdk-stable] [PATCH 02/11] net/qede: initialize VF tunnel as enabled on start Rasesh Mody
2018-01-27 21:15 ` [dpdk-stable] [PATCH 03/11] net/qede: fix to check if tunnel L3 header is valid Rasesh Mody
2018-01-27 21:15 ` [dpdk-stable] [PATCH 04/11] net/qede: fix tunnel header size in Tx BD configuration Rasesh Mody
2018-01-27 21:15 ` [dpdk-stable] [PATCH 06/11] net/qede: fix MTU set and max Rx pkt len usage Rasesh Mody
2018-01-27 21:15 ` [dpdk-stable] [PATCH 07/11] net/qede: fix clearing of queue stats Rasesh Mody

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).