DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit
@ 2017-07-24 10:10 Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 2/7] net/qede: fix incorrect queue id for 100G Rasesh Mody
                   ` (6 more replies)
  0 siblings, 7 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, Dept-EngDPDKDev

This patch modifies the recovery flow to allow ongoing PCIe
transactions to be completed. To achieve this, the load sequence is
changed such that the "final_cleanup" notification is sent while the
FID_enable is cleared.
This change ensures that the chip cleanup actions takes place from
previous driver instance if needed.

Fixes: ec94dbc57362 ("qede: add base driver")

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/ecore_dev.c     |  121 +++++++++++++++------------
 drivers/net/qede/base/ecore_dev_api.h |   12 +++
 drivers/net/qede/base/ecore_int.c     |  144 ++++++++++++++++-----------------
 drivers/net/qede/base/ecore_int.h     |    3 +
 drivers/net/qede/base/ecore_mcp.c     |   45 +++++++++++
 drivers/net/qede/base/ecore_mcp.h     |   11 +++
 drivers/net/qede/base/ecore_mcp_api.h |   11 +++
 7 files changed, 224 insertions(+), 123 deletions(-)

diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 4cfa668..65b89b8 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1080,7 +1080,7 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
 	}
 
 	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+		   "Sending final cleanup for PFVF[%d] [Command %08x]\n",
 		   id, command);
 
 	ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
@@ -1776,13 +1776,6 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 	/* perform debug configuration when chip is out of reset */
 	OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
 
-	/* Cleanup chip from previous driver if such remains exist */
-	rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
-	if (rc != ECORE_SUCCESS) {
-		ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
-		return rc;
-	}
-
 	/* PF Init sequence */
 	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
 	if (rc)
@@ -1866,17 +1859,17 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
 	return rc;
 }
 
-static enum _ecore_status_t
-ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
-		      struct ecore_ptt *p_ptt, u8 enable)
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+						  struct ecore_ptt *p_ptt,
+						  bool b_enable)
 {
-	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+	u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
 
-	/* Change PF in PXP */
+	/* Configure the PF's internal FID_enable for master transactions */
 	ecore_wr(p_hwfn, p_ptt,
 		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
 
-	/* wait until value is set - try for 1 second every 50us */
+	/* Wait until value is set - try for 1 second every 50us */
 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
 		val = ecore_rd(p_hwfn, p_ptt,
 			       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -1918,14 +1911,21 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+				     struct ecore_ptt *p_ptt)
+{
+	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+		 1 << p_hwfn->abs_pf_id);
+}
+
 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 				   struct ecore_hw_init_params *p_params)
 {
 	struct ecore_load_req_params load_req_params;
-	u32 load_code, param, drv_mb_param;
+	u32 load_code, resp, param, drv_mb_param;
 	bool b_default_mtu = true;
 	struct ecore_hwfn *p_hwfn;
-	enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+	enum _ecore_status_t rc = ECORE_SUCCESS;
 	int i;
 
 	if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
@@ -1942,7 +1942,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 	}
 
 	for_each_hwfn(p_dev, i) {
-		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+		p_hwfn = &p_dev->hwfns[i];
 
 		/* If management didn't provide a default, set one of our own */
 		if (!p_hwfn->hw_info.mtu) {
@@ -1955,11 +1955,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 			continue;
 		}
 
-		/* Enable DMAE in PXP */
-		rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
-		if (rc != ECORE_SUCCESS)
-			return rc;
-
 		rc = ecore_calc_hw_mode(p_hwfn);
 		if (rc != ECORE_SUCCESS)
 			return rc;
@@ -2009,6 +2004,30 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 			qm_lock_init = true;
 		}
 
+		/* Clean up chip from previous driver if such remains exist.
+		 * This is not needed when the PF is the first one on the
+		 * engine, since afterwards we are going to init the FW.
+		 */
+		if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+			rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+						 p_hwfn->rel_pf_id, false);
+			if (rc != ECORE_SUCCESS) {
+				ecore_hw_err_notify(p_hwfn,
+						    ECORE_HW_ERR_RAMROD_FAIL);
+				goto load_err;
+			}
+		}
+
+		/* Log and clean previous pglue_b errors if such exist */
+		ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+		ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+		/* Enable the PF's internal FID_enable in the PXP */
+		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+						  true);
+		if (rc != ECORE_SUCCESS)
+			goto load_err;
+
 		switch (load_code) {
 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
 			rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2037,35 +2056,28 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 			break;
 		}
 
-		if (rc != ECORE_SUCCESS)
+		if (rc != ECORE_SUCCESS) {
 			DP_NOTICE(p_hwfn, true,
 				  "init phase failed for loadcode 0x%x (rc %d)\n",
 				  load_code, rc);
+			goto load_err;
+		}
 
-		/* ACK mfw regardless of success or failure of initialization */
-		mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-				       DRV_MSG_CODE_LOAD_DONE,
-				       0, &load_code, &param);
+		rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
 		if (rc != ECORE_SUCCESS)
 			return rc;
 
-		if (mfw_rc != ECORE_SUCCESS) {
-			DP_NOTICE(p_hwfn, true,
-				  "Failed sending a LOAD_DONE command\n");
-			return mfw_rc;
-		}
-
 		/* send DCBX attention request command */
 		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
 			   "sending phony dcbx set command to trigger DCBx attention handling\n");
-		mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
-				       DRV_MSG_CODE_SET_DCBX,
-				       1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
-				       &load_code, &param);
-		if (mfw_rc != ECORE_SUCCESS) {
+		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+				   DRV_MSG_CODE_SET_DCBX,
+				   1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+				   &param);
+		if (rc != ECORE_SUCCESS) {
 			DP_NOTICE(p_hwfn, true,
 				  "Failed to send DCBX attention request\n");
-			return mfw_rc;
+			return rc;
 		}
 
 		p_hwfn->hw_init_done = true;
@@ -2076,7 +2088,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 		drv_mb_param = STORM_FW_VERSION;
 		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
 				   DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
-				   drv_mb_param, &load_code, &param);
+				   drv_mb_param, &resp, &param);
 		if (rc != ECORE_SUCCESS)
 			DP_INFO(p_hwfn, "Failed to update firmware version\n");
 
@@ -2094,6 +2106,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
 	}
 
 	return rc;
+
+load_err:
+	/* The MFW load lock should be released regardless of success or failure
+	 * of initialization.
+	 * TODO: replace this with an attempt to send cancel_load.
+	 */
+	ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+	return rc;
 }
 
 #define ECORE_HW_STOP_RETRY_LIMIT	(10)
@@ -2261,18 +2281,20 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
 		}
 	} /* hwfn loop */
 
-	if (IS_PF(p_dev)) {
+	if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
 		p_hwfn = ECORE_LEADING_HWFN(p_dev);
 		p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
 
-		/* Disable DMAE in PXP - in CMT, this should only be done for
-		 * first hw-function, and only after all transactions have
-		 * stopped for all active hw-functions.
-		 */
-		rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+		 /* Clear the PF's internal FID_enable in the PXP.
+		  * In CMT this should only be done for first hw-function, and
+		  * only after all transactions have stopped for all active
+		  * hw-functions.
+		  */
+		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+						  false);
 		if (rc != ECORE_SUCCESS) {
 			DP_NOTICE(p_hwfn, true,
-				  "ecore_change_pci_hwfn failed. rc = %d.\n",
+				  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
 				  rc);
 			rc2 = ECORE_UNKNOWN_ERROR;
 		}
@@ -2370,9 +2392,8 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
 			 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
 	}
 
-	/* Clean Previous errors if such exist */
-	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
-		 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+	/* Clean previous pglue_b errors if such exist */
+	ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
 
 	/* enable internal target-read */
 	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 886407b..eea22e0 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -584,4 +584,16 @@ enum _ecore_status_t
 ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
 			 u16 tx_coal, void *p_handle);
 
+/**
+ * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+						  struct ecore_ptt *p_ptt,
+						  bool b_enable);
 #endif
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 2afca29..b57c510 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -284,122 +284,119 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+						   struct ecore_ptt *p_ptt)
 {
 	u32 tmp;
 
-	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-		       PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+	tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
 	if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
 		u32 addr_lo, addr_hi, details;
 
-		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_lo = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
-		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_hi = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
-		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		details = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_WR_DETAILS);
 
-		DP_INFO(p_hwfn,
-			"Illegal write by chip to [%08x:%08x] blocked."
-			"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
-			" Details2 %08x [Was_error %02x BME deassert %02x"
-			" FID_enable deassert %02x]\n",
-			addr_hi, addr_lo, details,
-			(u8)((details &
-			      ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
-			     ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
-			(u8)((details &
-			      ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
-			     ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
-			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
-			     ? 1 : 0), tmp,
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
-			     : 0),
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
-			     0),
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
-			     : 0));
+		DP_NOTICE(p_hwfn, false,
+			  "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+			  addr_hi, addr_lo, details,
+			  (u8)((details &
+				ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+			       ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+			  (u8)((details &
+				ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+			       ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+			  (u8)((details &
+			       ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+			  tmp,
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+				1 : 0),
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+				1 : 0),
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+				1 : 0));
 	}
 
-	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-		       PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+	tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
 	if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
 		u32 addr_lo, addr_hi, details;
 
-		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_lo = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
-		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_hi = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
-		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		details = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_TX_ERR_RD_DETAILS);
 
-		DP_INFO(p_hwfn,
-			"Illegal read by chip from [%08x:%08x] blocked."
-			" Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
-			" Details2 %08x [Was_error %02x BME deassert %02x"
-			" FID_enable deassert %02x]\n",
-			addr_hi, addr_lo, details,
-			(u8)((details &
-			      ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
-			     ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
-			(u8)((details &
-			      ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
-			     ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
-			(u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
-			     ? 1 : 0), tmp,
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
-			     : 0),
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
-			     0),
-			(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
-			     : 0));
+		DP_NOTICE(p_hwfn, false,
+			  "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+			  addr_hi, addr_lo, details,
+			  (u8)((details &
+				ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+			       ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+			  (u8)((details &
+				ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+			       ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+			  (u8)((details &
+			       ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+			  tmp,
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+				1 : 0),
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+				1 : 0),
+			  (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+				1 : 0));
 	}
 
-	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-		       PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+	tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
 	if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
-		DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+		DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
 
-	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-		       PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+	tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
 	if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
 		u32 addr_hi, addr_lo;
 
-		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_lo = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
-		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_hi = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
 
-		DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
-			tmp, addr_hi, addr_lo);
+		DP_NOTICE(p_hwfn, false,
+			  "ICPL erorr - %08x [Address %08x:%08x]\n",
+			  tmp, addr_hi, addr_lo);
 	}
 
-	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
-		       PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+	tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
 	if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
 		u32 addr_hi, addr_lo, details;
 
-		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_lo = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
-		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		addr_hi = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
-		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+		details = ecore_rd(p_hwfn, p_ptt,
 				   PGLUE_B_REG_VF_ILT_ERR_DETAILS);
 
-		DP_INFO(p_hwfn,
-			"ILT error - Details %08x Details2 %08x"
-			" [Address %08x:%08x]\n",
-			details, tmp, addr_hi, addr_lo);
+		DP_NOTICE(p_hwfn, false,
+			  "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+			  details, tmp, addr_hi, addr_lo);
 	}
 
 	/* Clear the indications */
-	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
-		 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
 
 	return ECORE_SUCCESS;
 }
 
+static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+	return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
 {
 	DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@@ -505,7 +502,7 @@ enum aeu_invert_reg_special_type {
 	 {			/* After Invert 2 */
 	  {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
 	  {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
-	  {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+	  {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
 	   BLOCK_PGLUE_B},
 	  {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
 	  {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -827,8 +824,9 @@ static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
 				     ATTN_TYPE_INTERRUPT, !b_fatal);
 }
 
+	/* @DPDK */
 	/* Reach assertion if attention is fatal */
-	if (b_fatal) {
+	if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
 		DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
 			  p_bit_name);
 
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 0c8929e..067ed60 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -208,4 +208,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
 #define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
 #endif
 
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+						   struct ecore_ptt *p_ptt);
+
 #endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 03cc901..88c5ceb 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -893,6 +893,30 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
 	return ECORE_SUCCESS;
 }
 
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+					 struct ecore_ptt *p_ptt)
+{
+	u32 resp = 0, param = 0;
+	enum _ecore_status_t rc;
+
+	rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+			   &param);
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(p_hwfn, false,
+			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+		return rc;
+	}
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
+
+	/* Check if there is a DID mismatch between nvm-cfg/efuse */
+	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+		DP_NOTICE(p_hwfn, false,
+			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+	return ECORE_SUCCESS;
+}
+
 enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
 					  struct ecore_ptt *p_ptt)
 {
@@ -2893,6 +2917,27 @@ struct ecore_resc_alloc_out_params {
 	u32 flags;
 };
 
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS	100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+	struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+	enum _ecore_status_t rc;
+
+	/* Allow ongoing PCIe transactions to complete */
+	OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+	/* Clear the PF's internal FID_enable in the PXP */
+	rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+	if (rc != ECORE_SUCCESS)
+		DP_NOTICE(p_hwfn, false,
+			  "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+			  rc);
+
+	return rc;
+}
+
 static enum _ecore_status_t
 ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
 			      struct ecore_ptt *p_ptt,
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 37d1835..77fb5a3 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -171,6 +171,17 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
 					struct ecore_load_req_params *p_params);
 
 /**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+					 struct ecore_ptt *p_ptt);
+
+/**
  * @brief Sends a UNLOAD_REQ message to the MFW
  *
  * @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 190c135..abc190c 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -736,6 +736,17 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
 						  struct ecore_ptt *p_ptt);
 
 /**
+ * @brief A recovery handler must call this function as its first step.
+ *        It is assumed that the handler is not run from an interrupt context.
+ *
+ *  @param p_dev
+ *  @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
+
+/**
  * @brief Notify MFW about the change in base device properties
  *
  *  @param p_hwfn
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 2/7] net/qede: fix incorrect queue id for 100G
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 3/7] net/qede/base: fix for adapter specific stats Rasesh Mody
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, Dept-EngDPDKDev, Rasesh Mody

From: Harish Patil <harish.patil@cavium.com>

'commit 4c4bdadfa9e7 ("net/qede: refactoring multi-queue implementation")'
introduced a regression where default RSS configuration is incorrect in
the case of 100G mode. Currently we are passing absolute queue ids while
creating RX/TX queues. But in CMT mode we need to provide queue id
relative to the engine id. So this fix takes into account num_hwfns
while creating queues.

Fixes: 4c4bdadfa9e7 ("net/qede: refactoring multi-queue implementation")

Signed-off-by: Harish Patil <harish.patil@cavium.com>
Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_rxtx.c |    6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0de28c7..a232d20 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -552,8 +552,9 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 		ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
 		/* Prepare ramrod */
 		memset(&params, 0, sizeof(params));
-		params.queue_id = rx_queue_id;
+		params.queue_id = rx_queue_id / edev->num_hwfns;
 		params.vport_id = 0;
+		params.stats_id = params.vport_id;
 		params.sb = fp->sb_info->igu_sb_id;
 		DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
 				fp->rxq->queue_id, fp->sb_info->igu_sb_id);
@@ -610,8 +611,9 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
 		txq = eth_dev->data->tx_queues[tx_queue_id];
 		fp = &qdev->fp_array[tx_queue_id];
 		memset(&params, 0, sizeof(params));
-		params.queue_id = tx_queue_id;
+		params.queue_id = tx_queue_id / edev->num_hwfns;
 		params.vport_id = 0;
+		params.stats_id = params.vport_id;
 		params.sb = fp->sb_info->igu_sb_id;
 		DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
 				fp->txq->queue_id, fp->sb_info->igu_sb_id);
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 3/7] net/qede/base: fix for adapter specific stats
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 2/7] net/qede: fix incorrect queue id for 100G Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 4/7] net/qede: fix inner L3/L4 chksum offload for tunnel frames Rasesh Mody
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, Dept-EngDPDKDev

Handle different MAC statistic fields between two chip variants by
reading the MAC counters from the adapter suitable statistics bins.

Fixes: ec94dbc57362 ("qede: add base driver")

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/base/bcm_osal.c      |    7 +-
 drivers/net/qede/base/ecore_dev_api.h |   40 +++--
 drivers/net/qede/base/ecore_l2.c      |  188 ++++++++++++--------
 drivers/net/qede/base/mcp_public.h    |   58 ++++--
 drivers/net/qede/qede_ethdev.c        |  312 ++++++++++++++++++++++-----------
 5 files changed, 400 insertions(+), 205 deletions(-)

diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 968fb76..2603a8b 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -249,8 +249,11 @@ u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
 
 	if (type == ECORE_MCP_LAN_STATS) {
 		ecore_get_vport_stats(edev, &lan_stats);
-		stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
-		stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+
+		/* @DPDK */
+		stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
+		stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
+
 		stats->lan_stats.fcs_err = -1;
 	} else {
 		DP_INFO(edev, "Statistics request type %d not supported\n",
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index eea22e0..9126cf9 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -241,7 +241,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
 		       struct ecore_ptt *p_ptt);
 
 #ifndef __EXTRACT__LINUX__
-struct ecore_eth_stats {
+struct ecore_eth_stats_common {
 	u64 no_buff_discards;
 	u64 packet_too_big_discard;
 	u64 ttl0_discard;
@@ -273,11 +273,6 @@ struct ecore_eth_stats {
 	u64 rx_256_to_511_byte_packets;
 	u64 rx_512_to_1023_byte_packets;
 	u64 rx_1024_to_1518_byte_packets;
-	u64 rx_1519_to_1522_byte_packets;
-	u64 rx_1519_to_2047_byte_packets;
-	u64 rx_2048_to_4095_byte_packets;
-	u64 rx_4096_to_9216_byte_packets;
-	u64 rx_9217_to_16383_byte_packets;
 	u64 rx_crc_errors;
 	u64 rx_mac_crtl_frames;
 	u64 rx_pause_frames;
@@ -294,14 +289,8 @@ struct ecore_eth_stats {
 	u64 tx_256_to_511_byte_packets;
 	u64 tx_512_to_1023_byte_packets;
 	u64 tx_1024_to_1518_byte_packets;
-	u64 tx_1519_to_2047_byte_packets;
-	u64 tx_2048_to_4095_byte_packets;
-	u64 tx_4096_to_9216_byte_packets;
-	u64 tx_9217_to_16383_byte_packets;
 	u64 tx_pause_frames;
 	u64 tx_pfc_frames;
-	u64 tx_lpi_entry_count;
-	u64 tx_total_collisions;
 	u64 brb_truncates;
 	u64 brb_discards;
 	u64 rx_mac_bytes;
@@ -315,6 +304,33 @@ struct ecore_eth_stats {
 	u64 tx_mac_bc_packets;
 	u64 tx_mac_ctrl_frames;
 };
+
+struct ecore_eth_stats_bb {
+	u64 rx_1519_to_1522_byte_packets;
+	u64 rx_1519_to_2047_byte_packets;
+	u64 rx_2048_to_4095_byte_packets;
+	u64 rx_4096_to_9216_byte_packets;
+	u64 rx_9217_to_16383_byte_packets;
+	u64 tx_1519_to_2047_byte_packets;
+	u64 tx_2048_to_4095_byte_packets;
+	u64 tx_4096_to_9216_byte_packets;
+	u64 tx_9217_to_16383_byte_packets;
+	u64 tx_lpi_entry_count;
+	u64 tx_total_collisions;
+};
+
+struct ecore_eth_stats_ah {
+	u64 rx_1519_to_max_byte_packets;
+	u64 tx_1519_to_max_byte_packets;
+};
+
+struct ecore_eth_stats {
+	struct ecore_eth_stats_common common;
+	union {
+		struct ecore_eth_stats_bb bb;
+		struct ecore_eth_stats_ah ah;
+	};
+};
 #endif
 
 enum ecore_dmae_address_type_t {
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 4ab8fd5..e58b8fa 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1714,13 +1714,20 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
 	OSAL_MEMSET(&pstats, 0, sizeof(pstats));
 	ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
 
-	p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
-	p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
-	p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
-	p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
-	p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
-	p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
-	p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+	p_stats->common.tx_ucast_bytes +=
+		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+	p_stats->common.tx_mcast_bytes +=
+		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+	p_stats->common.tx_bcast_bytes +=
+		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+	p_stats->common.tx_ucast_pkts +=
+		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+	p_stats->common.tx_mcast_pkts +=
+		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+	p_stats->common.tx_bcast_pkts +=
+		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+	p_stats->common.tx_err_drop_pkts +=
+		HILO_64_REGPAIR(pstats.error_drop_pkts);
 }
 
 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
@@ -1746,10 +1753,10 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
 	OSAL_MEMSET(&tstats, 0, sizeof(tstats));
 	ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
 
-	p_stats->mftag_filter_discards +=
-	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
-	p_stats->mac_filter_discards +=
-	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+	p_stats->common.mftag_filter_discards +=
+		HILO_64_REGPAIR(tstats.mftag_filter_discard);
+	p_stats->common.mac_filter_discards +=
+		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
 }
 
 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1783,12 +1790,18 @@ static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
 	OSAL_MEMSET(&ustats, 0, sizeof(ustats));
 	ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
 
-	p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
-	p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
-	p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
-	p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
-	p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
-	p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+	p_stats->common.rx_ucast_bytes +=
+		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+	p_stats->common.rx_mcast_bytes +=
+		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+	p_stats->common.rx_bcast_bytes +=
+		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+	p_stats->common.rx_ucast_pkts +=
+		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+	p_stats->common.rx_mcast_pkts +=
+		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+	p_stats->common.rx_bcast_pkts +=
+		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
 }
 
 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1822,23 +1835,27 @@ static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
 	OSAL_MEMSET(&mstats, 0, sizeof(mstats));
 	ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
 
-	p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
-	p_stats->packet_too_big_discard +=
-	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
-	p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
-	p_stats->tpa_coalesced_pkts +=
-	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
-	p_stats->tpa_coalesced_events +=
-	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
-	p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
-	p_stats->tpa_coalesced_bytes +=
-	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+	p_stats->common.no_buff_discards +=
+		HILO_64_REGPAIR(mstats.no_buff_discard);
+	p_stats->common.packet_too_big_discard +=
+		HILO_64_REGPAIR(mstats.packet_too_big_discard);
+	p_stats->common.ttl0_discard +=
+		HILO_64_REGPAIR(mstats.ttl0_discard);
+	p_stats->common.tpa_coalesced_pkts +=
+		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+	p_stats->common.tpa_coalesced_events +=
+		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+	p_stats->common.tpa_aborts_num +=
+		HILO_64_REGPAIR(mstats.tpa_aborts_num);
+	p_stats->common.tpa_coalesced_bytes +=
+		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
 }
 
 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
 					 struct ecore_ptt *p_ptt,
 					 struct ecore_eth_stats *p_stats)
 {
+	struct ecore_eth_stats_common *p_common = &p_stats->common;
 	struct port_stats port_stats;
 	int j;
 
@@ -1849,54 +1866,75 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
 			  OFFSETOF(struct public_port, stats),
 			  sizeof(port_stats));
 
-	p_stats->rx_64_byte_packets += port_stats.eth.r64;
-	p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
-	p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
-	p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
-	p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
-	p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
-	p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
-	p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
-	p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
-	p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
-	p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
-	p_stats->rx_crc_errors += port_stats.eth.rfcs;
-	p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
-	p_stats->rx_pause_frames += port_stats.eth.rxpf;
-	p_stats->rx_pfc_frames += port_stats.eth.rxpp;
-	p_stats->rx_align_errors += port_stats.eth.raln;
-	p_stats->rx_carrier_errors += port_stats.eth.rfcr;
-	p_stats->rx_oversize_packets += port_stats.eth.rovr;
-	p_stats->rx_jabbers += port_stats.eth.rjbr;
-	p_stats->rx_undersize_packets += port_stats.eth.rund;
-	p_stats->rx_fragments += port_stats.eth.rfrg;
-	p_stats->tx_64_byte_packets += port_stats.eth.t64;
-	p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
-	p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
-	p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
-	p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
-	p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
-	p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
-	p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
-	p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
-	p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
-	p_stats->tx_pause_frames += port_stats.eth.txpf;
-	p_stats->tx_pfc_frames += port_stats.eth.txpp;
-	p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
-	p_stats->tx_total_collisions += port_stats.eth.tncl;
-	p_stats->rx_mac_bytes += port_stats.eth.rbyte;
-	p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
-	p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
-	p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
-	p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
-	p_stats->tx_mac_bytes += port_stats.eth.tbyte;
-	p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
-	p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
-	p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
-	p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+	p_common->rx_64_byte_packets += port_stats.eth.r64;
+	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+	p_common->rx_crc_errors += port_stats.eth.rfcs;
+	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+	p_common->rx_pause_frames += port_stats.eth.rxpf;
+	p_common->rx_pfc_frames += port_stats.eth.rxpp;
+	p_common->rx_align_errors += port_stats.eth.raln;
+	p_common->rx_carrier_errors += port_stats.eth.rfcr;
+	p_common->rx_oversize_packets += port_stats.eth.rovr;
+	p_common->rx_jabbers += port_stats.eth.rjbr;
+	p_common->rx_undersize_packets += port_stats.eth.rund;
+	p_common->rx_fragments += port_stats.eth.rfrg;
+	p_common->tx_64_byte_packets += port_stats.eth.t64;
+	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+	p_common->tx_pause_frames += port_stats.eth.txpf;
+	p_common->tx_pfc_frames += port_stats.eth.txpp;
+	p_common->rx_mac_bytes += port_stats.eth.rbyte;
+	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+	p_common->tx_mac_bytes += port_stats.eth.tbyte;
+	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
 	for (j = 0; j < 8; j++) {
-		p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
-		p_stats->brb_discards += port_stats.brb.brb_discard[j];
+		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+		p_common->brb_discards += port_stats.brb.brb_discard[j];
+	}
+
+	if (ECORE_IS_BB(p_hwfn->p_dev)) {
+		struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
+
+		p_bb->rx_1519_to_1522_byte_packets +=
+			port_stats.eth.u0.bb0.r1522;
+		p_bb->rx_1519_to_2047_byte_packets +=
+			port_stats.eth.u0.bb0.r2047;
+		p_bb->rx_2048_to_4095_byte_packets +=
+			port_stats.eth.u0.bb0.r4095;
+		p_bb->rx_4096_to_9216_byte_packets +=
+			port_stats.eth.u0.bb0.r9216;
+		p_bb->rx_9217_to_16383_byte_packets +=
+			port_stats.eth.u0.bb0.r16383;
+		p_bb->tx_1519_to_2047_byte_packets +=
+			port_stats.eth.u1.bb1.t2047;
+		p_bb->tx_2048_to_4095_byte_packets +=
+			port_stats.eth.u1.bb1.t4095;
+		p_bb->tx_4096_to_9216_byte_packets +=
+			port_stats.eth.u1.bb1.t9216;
+		p_bb->tx_9217_to_16383_byte_packets +=
+			port_stats.eth.u1.bb1.t16383;
+		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+	} else {
+		struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
+
+		p_ah->rx_1519_to_max_byte_packets +=
+			port_stats.eth.u0.ah0.r1519_to_max;
+		p_ah->tx_1519_to_max_byte_packets =
+			port_stats.eth.u1.ah1.t1519_to_max;
 	}
 }
 
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index fcf9847..1ad8a96 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -132,13 +132,28 @@ struct eth_stats {
 	u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
 /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
 	u64 r1518;
+	union {
+		struct { /* bb */
 /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
-	u64 r1522;
-	u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
-	u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
-	u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+			u64 r1522;
+/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+			u64 r2047;
+/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+			u64 r4095;
+/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+			u64 r9216;
 /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
-	u64 r16383;
+			u64 r16383;
+		} bb0;
+		struct { /* ah */
+			u64 unused1;
+/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/
+			u64 r1519_to_max;
+			u64 unused2;
+			u64 unused3;
+			u64 unused4;
+		} ah0;
+	} u0;
 	u64 rfcs;       /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
 	u64 rxcf;       /* 0x10 (Offset 0x60 ) RX control frame counter*/
 	u64 rxpf;       /* 0x11 (Offset 0x68 ) RX pause frame counter*/
@@ -156,19 +171,40 @@ struct eth_stats {
 	u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
 /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
 	u64 t1518;
+	union {
+		struct { /* bb */
 /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
-	u64 t2047;
+			u64 t2047;
 /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
-	u64 t4095;
+			u64 t4095;
 /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
-	u64 t9216;
+			u64 t9216;
 /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
-	u64 t16383;
+			u64 t16383;
+		} bb1;
+		struct { /* ah */
+/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */
+			u64 t1519_to_max;
+			u64 unused6;
+			u64 unused7;
+			u64 unused8;
+		} ah1;
+	} u1;
 	u64 txpf;       /* 0x50 (Offset 0xf8 ) TX pause frame counter */
 	u64 txpp;       /* 0x51 (Offset 0x100) TX PFC frame counter */
 /* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
-	u64 tlpiec;
-	u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+	union {
+		struct { /* bb */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+			u64 tlpiec;
+/* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+			u64 tncl;
+		} bb2;
+		struct { /* ah */
+			u64 unused9;
+			u64 unused10;
+		} ah2;
+	} u2;
 	u64 rbyte;      /* 0x3d (Offset 0x118) RX byte counter */
 	u64 rxuca;      /* 0x0c (Offset 0x120) RX UC frame counter */
 	u64 rxmca;      /* 0x0d (Offset 0x128) RX MC frame counter */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index a0616a4..ac58ae5 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
 };
 
 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
-	{"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+	{"rx_unicast_bytes",
+		offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
 	{"rx_multicast_bytes",
-		offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+		offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
 	{"rx_broadcast_bytes",
-		offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
-	{"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+		offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+	{"rx_unicast_packets",
+		offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
 	{"rx_multicast_packets",
-		offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+		offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
 	{"rx_broadcast_packets",
-		offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+		offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
 
-	{"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+	{"tx_unicast_bytes",
+		offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
 	{"tx_multicast_bytes",
-		offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+		offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
 	{"tx_broadcast_bytes",
-		offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
-	{"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+		offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+	{"tx_unicast_packets",
+		offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
 	{"tx_multicast_packets",
-		offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+		offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
 	{"tx_broadcast_packets",
-		offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+		offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
 
 	{"rx_64_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+		offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
 	{"rx_65_to_127_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 rx_65_to_127_byte_packets)},
 	{"rx_128_to_255_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 rx_128_to_255_byte_packets)},
 	{"rx_256_to_511_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 rx_256_to_511_byte_packets)},
 	{"rx_512_to_1023_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 rx_512_to_1023_byte_packets)},
 	{"rx_1024_to_1518_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
-	{"rx_1519_to_1522_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
-	{"rx_1519_to_2047_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
-	{"rx_2048_to_4095_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
-	{"rx_4096_to_9216_byte_packets",
-		offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
-	{"rx_9217_to_16383_byte_packets",
-		offsetof(struct ecore_eth_stats,
-			 rx_9217_to_16383_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 rx_1024_to_1518_byte_packets)},
 	{"tx_64_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+		offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
 	{"tx_65_to_127_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 tx_65_to_127_byte_packets)},
 	{"tx_128_to_255_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 tx_128_to_255_byte_packets)},
 	{"tx_256_to_511_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 tx_256_to_511_byte_packets)},
 	{"tx_512_to_1023_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 tx_512_to_1023_byte_packets)},
 	{"tx_1024_to_1518_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
-	{"trx_1519_to_1522_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
-	{"tx_2048_to_4095_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
-	{"tx_4096_to_9216_byte_packets",
-		offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
-	{"tx_9217_to_16383_byte_packets",
-		offsetof(struct ecore_eth_stats,
-			 tx_9217_to_16383_byte_packets)},
+		offsetof(struct ecore_eth_stats_common,
+			 tx_1024_to_1518_byte_packets)},
 
 	{"rx_mac_crtl_frames",
-		offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+		offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
 	{"tx_mac_control_frames",
-		offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
-	{"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
-	{"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+		offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+	{"rx_pause_frames",
+		offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+	{"tx_pause_frames",
+		offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
 	{"rx_priority_flow_control_frames",
-		offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+		offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
 	{"tx_priority_flow_control_frames",
-		offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+		offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
 
-	{"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
-	{"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+	{"rx_crc_errors",
+		offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+	{"rx_align_errors",
+		offsetof(struct ecore_eth_stats_common, rx_align_errors)},
 	{"rx_carrier_errors",
-		offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+		offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
 	{"rx_oversize_packet_errors",
-		offsetof(struct ecore_eth_stats, rx_oversize_packets)},
-	{"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+		offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+	{"rx_jabber_errors",
+		offsetof(struct ecore_eth_stats_common, rx_jabbers)},
 	{"rx_undersize_packet_errors",
-		offsetof(struct ecore_eth_stats, rx_undersize_packets)},
-	{"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+		offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+	{"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
 	{"rx_host_buffer_not_available",
-		offsetof(struct ecore_eth_stats, no_buff_discards)},
+		offsetof(struct ecore_eth_stats_common, no_buff_discards)},
 	/* Number of packets discarded because they are bigger than MTU */
 	{"rx_packet_too_big_discards",
-		offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+		offsetof(struct ecore_eth_stats_common,
+			 packet_too_big_discard)},
 	{"rx_ttl_zero_discards",
-		offsetof(struct ecore_eth_stats, ttl0_discard)},
+		offsetof(struct ecore_eth_stats_common, ttl0_discard)},
 	{"rx_multi_function_tag_filter_discards",
-		offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+		offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
 	{"rx_mac_filter_discards",
-		offsetof(struct ecore_eth_stats, mac_filter_discards)},
+		offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
 	{"rx_hw_buffer_truncates",
-		offsetof(struct ecore_eth_stats, brb_truncates)},
+		offsetof(struct ecore_eth_stats_common, brb_truncates)},
 	{"rx_hw_buffer_discards",
-		offsetof(struct ecore_eth_stats, brb_discards)},
-	{"tx_lpi_entry_count",
-		offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
-	{"tx_total_collisions",
-		offsetof(struct ecore_eth_stats, tx_total_collisions)},
+		offsetof(struct ecore_eth_stats_common, brb_discards)},
 	{"tx_error_drop_packets",
-		offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+		offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
 
-	{"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+	{"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
 	{"rx_mac_unicast_packets",
-		offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+		offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
 	{"rx_mac_multicast_packets",
-		offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+		offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
 	{"rx_mac_broadcast_packets",
-		offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+		offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
 	{"rx_mac_frames_ok",
-		offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
-	{"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+		offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+	{"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
 	{"tx_mac_unicast_packets",
-		offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+		offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
 	{"tx_mac_multicast_packets",
-		offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+		offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
 	{"tx_mac_broadcast_packets",
-		offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+		offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
 
 	{"lro_coalesced_packets",
-		offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+		offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
 	{"lro_coalesced_events",
-		offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+		offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
 	{"lro_aborts_num",
-		offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+		offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
 	{"lro_not_coalesced_packets",
-		offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+		offsetof(struct ecore_eth_stats_common,
+			 tpa_not_coalesced_pkts)},
 	{"lro_coalesced_bytes",
-		offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+		offsetof(struct ecore_eth_stats_common,
+			 tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+	{"rx_1519_to_1522_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 rx_1519_to_1522_byte_packets)},
+	{"rx_1519_to_2047_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 rx_1519_to_2047_byte_packets)},
+	{"rx_2048_to_4095_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 rx_2048_to_4095_byte_packets)},
+	{"rx_4096_to_9216_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 rx_4096_to_9216_byte_packets)},
+	{"rx_9217_to_16383_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 rx_9217_to_16383_byte_packets)},
+
+	{"tx_1519_to_2047_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 tx_1519_to_2047_byte_packets)},
+	{"tx_2048_to_4095_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 tx_2048_to_4095_byte_packets)},
+	{"tx_4096_to_9216_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 tx_4096_to_9216_byte_packets)},
+	{"tx_9217_to_16383_byte_packets",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb,
+			 tx_9217_to_16383_byte_packets)},
+
+	{"tx_lpi_entry_count",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+	{"tx_total_collisions",
+		offsetof(struct ecore_eth_stats, bb) +
+		offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+	{"rx_1519_to_max_byte_packets",
+		offsetof(struct ecore_eth_stats, ah) +
+		offsetof(struct ecore_eth_stats_ah,
+			 rx_1519_to_max_byte_packets)},
+	{"tx_1519_to_max_byte_packets",
+		offsetof(struct ecore_eth_stats, ah) +
+		offsetof(struct ecore_eth_stats_ah,
+			 tx_1519_to_max_byte_packets)},
 };
 
 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -1416,32 +1472,33 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 	ecore_get_vport_stats(edev, &stats);
 
 	/* RX Stats */
-	eth_stats->ipackets = stats.rx_ucast_pkts +
-	    stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+	eth_stats->ipackets = stats.common.rx_ucast_pkts +
+	    stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
 
-	eth_stats->ibytes = stats.rx_ucast_bytes +
-	    stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+	eth_stats->ibytes = stats.common.rx_ucast_bytes +
+	    stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
 
-	eth_stats->ierrors = stats.rx_crc_errors +
-	    stats.rx_align_errors +
-	    stats.rx_carrier_errors +
-	    stats.rx_oversize_packets +
-	    stats.rx_jabbers + stats.rx_undersize_packets;
+	eth_stats->ierrors = stats.common.rx_crc_errors +
+	    stats.common.rx_align_errors +
+	    stats.common.rx_carrier_errors +
+	    stats.common.rx_oversize_packets +
+	    stats.common.rx_jabbers + stats.common.rx_undersize_packets;
 
-	eth_stats->rx_nombuf = stats.no_buff_discards;
+	eth_stats->rx_nombuf = stats.common.no_buff_discards;
 
-	eth_stats->imissed = stats.mftag_filter_discards +
-	    stats.mac_filter_discards +
-	    stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+	eth_stats->imissed = stats.common.mftag_filter_discards +
+	    stats.common.mac_filter_discards +
+	    stats.common.no_buff_discards +
+	    stats.common.brb_truncates + stats.common.brb_discards;
 
 	/* TX stats */
-	eth_stats->opackets = stats.tx_ucast_pkts +
-	    stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+	eth_stats->opackets = stats.common.tx_ucast_pkts +
+	    stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
 
-	eth_stats->obytes = stats.tx_ucast_bytes +
-	    stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+	eth_stats->obytes = stats.common.tx_ucast_bytes +
+	    stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
 
-	eth_stats->oerrors = stats.tx_err_drop_pkts;
+	eth_stats->oerrors = stats.common.tx_err_drop_pkts;
 
 	/* Queue stats */
 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1490,10 +1547,18 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 
 static unsigned
 qede_get_xstats_count(struct qede_dev *qdev) {
-	return RTE_DIM(qede_xstats_strings) +
-		(RTE_DIM(qede_rxq_xstats_strings) *
-		 RTE_MIN(QEDE_RSS_COUNT(qdev),
-			 RTE_ETHDEV_QUEUE_STAT_CNTRS));
+	if (ECORE_IS_BB(&qdev->edev))
+		return RTE_DIM(qede_xstats_strings) +
+		       RTE_DIM(qede_bb_xstats_strings) +
+		       (RTE_DIM(qede_rxq_xstats_strings) *
+			RTE_MIN(QEDE_RSS_COUNT(qdev),
+				RTE_ETHDEV_QUEUE_STAT_CNTRS));
+	else
+		return RTE_DIM(qede_xstats_strings) +
+		       RTE_DIM(qede_ah_xstats_strings) +
+		       (RTE_DIM(qede_rxq_xstats_strings) *
+			RTE_MIN(QEDE_RSS_COUNT(qdev),
+				RTE_ETHDEV_QUEUE_STAT_CNTRS));
 }
 
 static int
@@ -1502,6 +1567,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 		      __rte_unused unsigned int limit)
 {
 	struct qede_dev *qdev = dev->data->dev_private;
+	struct ecore_dev *edev = &qdev->edev;
 	const unsigned int stat_cnt = qede_get_xstats_count(qdev);
 	unsigned int i, qid, stat_idx = 0;
 	unsigned int rxq_stat_cntrs;
@@ -1515,6 +1581,24 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 			stat_idx++;
 		}
 
+		if (ECORE_IS_BB(edev)) {
+			for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+				snprintf(xstats_names[stat_idx].name,
+					sizeof(xstats_names[stat_idx].name),
+					"%s",
+					qede_bb_xstats_strings[i].name);
+				stat_idx++;
+			}
+		} else {
+			for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+				snprintf(xstats_names[stat_idx].name,
+					sizeof(xstats_names[stat_idx].name),
+					"%s",
+					qede_ah_xstats_strings[i].name);
+				stat_idx++;
+			}
+		}
+
 		rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
 					 RTE_ETHDEV_QUEUE_STAT_CNTRS);
 		for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1555,6 +1639,24 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 		stat_idx++;
 	}
 
+	if (ECORE_IS_BB(edev)) {
+		for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+			xstats[stat_idx].value =
+					*(uint64_t *)(((char *)&stats) +
+					qede_bb_xstats_strings[i].offset);
+			xstats[stat_idx].id = stat_idx;
+			stat_idx++;
+		}
+	} else {
+		for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+			xstats[stat_idx].value =
+					*(uint64_t *)(((char *)&stats) +
+					qede_ah_xstats_strings[i].offset);
+			xstats[stat_idx].id = stat_idx;
+			stat_idx++;
+		}
+	}
+
 	rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
 				 RTE_ETHDEV_QUEUE_STAT_CNTRS);
 	for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 4/7] net/qede: fix inner L3/L4 chksum offload for tunnel frames
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 2/7] net/qede: fix incorrect queue id for 100G Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 3/7] net/qede/base: fix for adapter specific stats Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 5/7] doc: list NPAR as supported feature in qede Rasesh Mody
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Harish Patil, Dept-EngDPDKDev

From: Harish Patil <harish.patil@cavium.com>

Force recalculation of tunnel L4 chksum when inner L3/L4 chskum gets
updated due to HW offload.

Fixes: aab21617502e ("net/qede: add Tx offloads for MPLS-in-UDP packets")

Signed-off-by: Harish Patil <harish.patil@cavium.com>
---
 drivers/net/qede/qede_rxtx.c |   29 ++++++++++++++++++++++-------
 1 file changed, 22 insertions(+), 7 deletions(-)

diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index a232d20..5c3613c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1681,12 +1681,6 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 				inner_l4_hdr_offset = (mbuf->l2_len -
 					MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
 
-				/* TODO: There's no DPDK flag to request outer
-				 * L4 checksum offload, so we don't do it.
-				 * bd1_bd_flags_bf |=
-				 *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
-				 *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
-				 */
 				/* Inner L2 size and address type */
 				bd2_bf1 |= (inner_l2_hdr_size &
 					ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
@@ -1765,15 +1759,36 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
 		}
 
 		/* Offload the IP checksum in the hardware */
-		if (tx_ol_flags & PKT_TX_IP_CKSUM)
+		if (tx_ol_flags & PKT_TX_IP_CKSUM) {
 			bd1_bd_flags_bf |=
 				1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+			/* There's no DPDK flag to request outer-L4 csum
+			 * offload. But in the case of tunnel if inner L3 or L4
+			 * csum offload is requested then we need to force
+			 * recalculation of L4 tunnel header csum also.
+			 */
+			if (tunn_flg) {
+				bd1_bd_flags_bf |=
+					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+			}
+		}
 
 		/* L4 checksum offload (tcp or udp) */
 		if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
 		    (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
 			bd1_bd_flags_bf |=
 				1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+			/* There's no DPDK flag to request outer-L4 csum
+			 * offload. But in the case of tunnel if inner L3 or L4
+			 * csum offload is requested then we need to force
+			 * recalculation of L4 tunnel header csum also.
+			 */
+			if (tunn_flg) {
+				bd1_bd_flags_bf |=
+					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+					ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+			}
 		}
 
 		/* Fill the entry in the SW ring and the BDs in the FW ring */
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 5/7] doc: list NPAR as supported feature in qede
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
                   ` (2 preceding siblings ...)
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 4/7] net/qede: fix inner L3/L4 chksum offload for tunnel frames Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 6/7] net/qede: fix chip details print Rasesh Mody
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Shahed Shaikh, Dept-EngDPDKDev

From: Shahed Shaikh <Shahed.Shaikh@cavium.com>

Existing qede PMD code already supports NPAR feature.
So adding this in "Supported Features" section after testing it with
latest DPDK.

Also, add myself to the list of maintainers of qede PMD

Signed-off-by: Shahed Shaikh <Shahed.Shaikh@cavium.com>
---
 MAINTAINERS              |    1 +
 doc/guides/nics/qede.rst |    2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index dc52760..fc26870 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -423,6 +423,7 @@ F: doc/guides/nics/features/bnx2x*.ini
 QLogic qede PMD
 M: Rasesh Mody <rasesh.mody@cavium.com>
 M: Harish Patil <harish.patil@cavium.com>
+M: Shahed Shaikh <shahed.shaikh@cavium.com>
 F: drivers/net/qede/
 F: doc/guides/nics/qede.rst
 F: doc/guides/nics/features/qede*.ini
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index afe2df8..09a10be 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -62,13 +62,13 @@ Supported Features
 - VXLAN tunneling offload
 - N-tuple filter and flow director (limited support)
 - LRO/TSO
+- NPAR (NIC Partitioning)
 
 Non-supported Features
 ----------------------
 
 - SR-IOV PF
 - GENEVE and NVGRE Tunneling offloads
-- NPAR
 
 Supported QLogic Adapters
 -------------------------
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 6/7] net/qede: fix chip details print
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
                   ` (3 preceding siblings ...)
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 5/7] doc: list NPAR as supported feature in qede Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 7/7] net/qede: update PMD version 2.5.2.1 Rasesh Mody
  2017-07-24 10:18 ` [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Mody, Rasesh
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, Dept-EngDPDKDev

Fix chip details printed as part of print adapter info

Fixes: 2ea6f76aff40 ("qede: add core driver")

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.c |    5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index ac58ae5..0e05989 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -363,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
 
 	DP_INFO(edev, "*********************************\n");
 	DP_INFO(edev, " DPDK version:%s\n", rte_version());
-	DP_INFO(edev, " Chip details : %s%d\n",
+	DP_INFO(edev, " Chip details : %s %c%d\n",
 		  ECORE_IS_BB(edev) ? "BB" : "AH",
-		  CHIP_REV_IS_A0(edev) ? 0 : 1);
+		  'A' + edev->chip_rev,
+		  (int)edev->chip_metal);
 	snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
 		 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
 	snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [dpdk-dev] [INTERNAL REVIEW 7/7] net/qede: update PMD version 2.5.2.1
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
                   ` (4 preceding siblings ...)
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 6/7] net/qede: fix chip details print Rasesh Mody
@ 2017-07-24 10:10 ` Rasesh Mody
  2017-07-24 10:18 ` [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Mody, Rasesh
  6 siblings, 0 replies; 9+ messages in thread
From: Rasesh Mody @ 2017-07-24 10:10 UTC (permalink / raw)
  To: dev; +Cc: Rasesh Mody, Dept-EngDPDKDev

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
---
 drivers/net/qede/qede_ethdev.h |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 510b6e2..a3254b1 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -50,7 +50,7 @@
 #define QEDE_PMD_VER_PREFIX		"QEDE PMD"
 #define QEDE_PMD_VERSION_MAJOR		2
 #define QEDE_PMD_VERSION_MINOR	        5
-#define QEDE_PMD_VERSION_REVISION       1
+#define QEDE_PMD_VERSION_REVISION       2
 #define QEDE_PMD_VERSION_PATCH	        1
 
 #define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "."     \
-- 
1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit
  2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
                   ` (5 preceding siblings ...)
  2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 7/7] net/qede: update PMD version 2.5.2.1 Rasesh Mody
@ 2017-07-24 10:18 ` Mody, Rasesh
  2017-07-24 15:00   ` Thomas Monjalon
  6 siblings, 1 reply; 9+ messages in thread
From: Mody, Rasesh @ 2017-07-24 10:18 UTC (permalink / raw)
  To: Mody, Rasesh, dev; +Cc: Dept-Eng DPDK Dev

Please ignore this patch-set with 'INTERNAL REVIEW' subject prefix. Sending a follow up with proper subject prefix.

Thanks!
-Rasesh

> -----Original Message-----
> From: Rasesh Mody [mailto:rasesh.mody@cavium.com]
> Sent: Monday, July 24, 2017 3:10 AM
> To: dev@dpdk.org
> Cc: Mody, Rasesh <Rasesh.Mody@cavium.com>; Dept-Eng DPDK Dev <Dept-
> EngDPDKDev@cavium.com>
> Subject: [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous
> ungraceful exit
> 
> This patch modifies the recovery flow to allow ongoing PCIe transactions to
> be completed. To achieve this, the load sequence is changed such that the
> "final_cleanup" notification is sent while the FID_enable is cleared.
> This change ensures that the chip cleanup actions takes place from previous
> driver instance if needed.
> 
> Fixes: ec94dbc57362 ("qede: add base driver")
> 
> Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
> ---
>  drivers/net/qede/base/ecore_dev.c     |  121 +++++++++++++++------------
>  drivers/net/qede/base/ecore_dev_api.h |   12 +++
>  drivers/net/qede/base/ecore_int.c     |  144 ++++++++++++++++--------------
> ---
>  drivers/net/qede/base/ecore_int.h     |    3 +
>  drivers/net/qede/base/ecore_mcp.c     |   45 +++++++++++
>  drivers/net/qede/base/ecore_mcp.h     |   11 +++
>  drivers/net/qede/base/ecore_mcp_api.h |   11 +++
>  7 files changed, 224 insertions(+), 123 deletions(-)
> 
> diff --git a/drivers/net/qede/base/ecore_dev.c
> b/drivers/net/qede/base/ecore_dev.c
> index 4cfa668..65b89b8 100644
> --- a/drivers/net/qede/base/ecore_dev.c
> +++ b/drivers/net/qede/base/ecore_dev.c
> @@ -1080,7 +1080,7 @@ enum _ecore_status_t ecore_final_cleanup(struct
> ecore_hwfn *p_hwfn,
>  	}
> 
>  	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
> -		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
> +		   "Sending final cleanup for PFVF[%d] [Command %08x]\n",
>  		   id, command);
> 
>  	ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
> command); @@ -1776,13 +1776,6 @@ static enum _ecore_status_t
> ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
>  	/* perform debug configuration when chip is out of reset */
>  	OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn-
> >my_id);
> 
> -	/* Cleanup chip from previous driver if such remains exist */
> -	rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
> -	if (rc != ECORE_SUCCESS) {
> -		ecore_hw_err_notify(p_hwfn,
> ECORE_HW_ERR_RAMROD_FAIL);
> -		return rc;
> -	}
> -
>  	/* PF Init sequence */
>  	rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
>  	if (rc)
> @@ -1866,17 +1859,17 @@ static enum _ecore_status_t
> ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
>  	return rc;
>  }
> 
> -static enum _ecore_status_t
> -ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
> -		      struct ecore_ptt *p_ptt, u8 enable)
> +enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn
> *p_hwfn,
> +						  struct ecore_ptt *p_ptt,
> +						  bool b_enable)
>  {
> -	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
> +	u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
> 
> -	/* Change PF in PXP */
> +	/* Configure the PF's internal FID_enable for master transactions */
>  	ecore_wr(p_hwfn, p_ptt,
>  		 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
> 
> -	/* wait until value is set - try for 1 second every 50us */
> +	/* Wait until value is set - try for 1 second every 50us */
>  	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
>  		val = ecore_rd(p_hwfn, p_ptt,
> 
> PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
> @@ -1918,14 +1911,21 @@ enum _ecore_status_t ecore_vf_start(struct
> ecore_hwfn *p_hwfn,
>  	return ECORE_SUCCESS;
>  }
> 
> +static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
> +				     struct ecore_ptt *p_ptt)
> +{
> +	ecore_wr(p_hwfn, p_ptt,
> PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
> +		 1 << p_hwfn->abs_pf_id);
> +}
> +
>  enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
>  				   struct ecore_hw_init_params *p_params)  {
>  	struct ecore_load_req_params load_req_params;
> -	u32 load_code, param, drv_mb_param;
> +	u32 load_code, resp, param, drv_mb_param;
>  	bool b_default_mtu = true;
>  	struct ecore_hwfn *p_hwfn;
> -	enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
> +	enum _ecore_status_t rc = ECORE_SUCCESS;
>  	int i;
> 
>  	if ((p_params->int_mode == ECORE_INT_MODE_MSI) && @@ -
> 1942,7 +1942,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev
> *p_dev,
>  	}
> 
>  	for_each_hwfn(p_dev, i) {
> -		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
> +		p_hwfn = &p_dev->hwfns[i];
> 
>  		/* If management didn't provide a default, set one of our
> own */
>  		if (!p_hwfn->hw_info.mtu) {
> @@ -1955,11 +1955,6 @@ enum _ecore_status_t ecore_hw_init(struct
> ecore_dev *p_dev,
>  			continue;
>  		}
> 
> -		/* Enable DMAE in PXP */
> -		rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt,
> true);
> -		if (rc != ECORE_SUCCESS)
> -			return rc;
> -
>  		rc = ecore_calc_hw_mode(p_hwfn);
>  		if (rc != ECORE_SUCCESS)
>  			return rc;
> @@ -2009,6 +2004,30 @@ enum _ecore_status_t ecore_hw_init(struct
> ecore_dev *p_dev,
>  			qm_lock_init = true;
>  		}
> 
> +		/* Clean up chip from previous driver if such remains exist.
> +		 * This is not needed when the PF is the first one on the
> +		 * engine, since afterwards we are going to init the FW.
> +		 */
> +		if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
> +			rc = ecore_final_cleanup(p_hwfn, p_hwfn-
> >p_main_ptt,
> +						 p_hwfn->rel_pf_id, false);
> +			if (rc != ECORE_SUCCESS) {
> +				ecore_hw_err_notify(p_hwfn,
> +
> ECORE_HW_ERR_RAMROD_FAIL);
> +				goto load_err;
> +			}
> +		}
> +
> +		/* Log and clean previous pglue_b errors if such exist */
> +		ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn-
> >p_main_ptt);
> +		ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
> +
> +		/* Enable the PF's internal FID_enable in the PXP */
> +		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn-
> >p_main_ptt,
> +						  true);
> +		if (rc != ECORE_SUCCESS)
> +			goto load_err;
> +
>  		switch (load_code) {
>  		case FW_MSG_CODE_DRV_LOAD_ENGINE:
>  			rc = ecore_hw_init_common(p_hwfn, p_hwfn-
> >p_main_ptt, @@ -2037,35 +2056,28 @@ enum _ecore_status_t
> ecore_hw_init(struct ecore_dev *p_dev,
>  			break;
>  		}
> 
> -		if (rc != ECORE_SUCCESS)
> +		if (rc != ECORE_SUCCESS) {
>  			DP_NOTICE(p_hwfn, true,
>  				  "init phase failed for loadcode 0x%x (rc
> %d)\n",
>  				  load_code, rc);
> +			goto load_err;
> +		}
> 
> -		/* ACK mfw regardless of success or failure of initialization */
> -		mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
> -				       DRV_MSG_CODE_LOAD_DONE,
> -				       0, &load_code, &param);
> +		rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
>  		if (rc != ECORE_SUCCESS)
>  			return rc;
> 
> -		if (mfw_rc != ECORE_SUCCESS) {
> -			DP_NOTICE(p_hwfn, true,
> -				  "Failed sending a LOAD_DONE
> command\n");
> -			return mfw_rc;
> -		}
> -
>  		/* send DCBX attention request command */
>  		DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
>  			   "sending phony dcbx set command to trigger DCBx
> attention handling\n");
> -		mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
> -				       DRV_MSG_CODE_SET_DCBX,
> -				       1 <<
> DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
> -				       &load_code, &param);
> -		if (mfw_rc != ECORE_SUCCESS) {
> +		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
> +				   DRV_MSG_CODE_SET_DCBX,
> +				   1 <<
> DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
> +				   &param);
> +		if (rc != ECORE_SUCCESS) {
>  			DP_NOTICE(p_hwfn, true,
>  				  "Failed to send DCBX attention request\n");
> -			return mfw_rc;
> +			return rc;
>  		}
> 
>  		p_hwfn->hw_init_done = true;
> @@ -2076,7 +2088,7 @@ enum _ecore_status_t ecore_hw_init(struct
> ecore_dev *p_dev,
>  		drv_mb_param = STORM_FW_VERSION;
>  		rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
> 
> DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
> -				   drv_mb_param, &load_code, &param);
> +				   drv_mb_param, &resp, &param);
>  		if (rc != ECORE_SUCCESS)
>  			DP_INFO(p_hwfn, "Failed to update firmware
> version\n");
> 
> @@ -2094,6 +2106,14 @@ enum _ecore_status_t ecore_hw_init(struct
> ecore_dev *p_dev,
>  	}
> 
>  	return rc;
> +
> +load_err:
> +	/* The MFW load lock should be released regardless of success or
> failure
> +	 * of initialization.
> +	 * TODO: replace this with an attempt to send cancel_load.
> +	 */
> +	ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
> +	return rc;
>  }
> 
>  #define ECORE_HW_STOP_RETRY_LIMIT	(10)
> @@ -2261,18 +2281,20 @@ enum _ecore_status_t ecore_hw_stop(struct
> ecore_dev *p_dev)
>  		}
>  	} /* hwfn loop */
> 
> -	if (IS_PF(p_dev)) {
> +	if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
>  		p_hwfn = ECORE_LEADING_HWFN(p_dev);
>  		p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
> 
> -		/* Disable DMAE in PXP - in CMT, this should only be done for
> -		 * first hw-function, and only after all transactions have
> -		 * stopped for all active hw-functions.
> -		 */
> -		rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
> +		 /* Clear the PF's internal FID_enable in the PXP.
> +		  * In CMT this should only be done for first hw-function, and
> +		  * only after all transactions have stopped for all active
> +		  * hw-functions.
> +		  */
> +		rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn-
> >p_main_ptt,
> +						  false);
>  		if (rc != ECORE_SUCCESS) {
>  			DP_NOTICE(p_hwfn, true,
> -				  "ecore_change_pci_hwfn failed. rc =
> %d.\n",
> +				  "ecore_pglueb_set_pfid_enable() failed. rc
> = %d.\n",
>  				  rc);
>  			rc2 = ECORE_UNKNOWN_ERROR;
>  		}
> @@ -2370,9 +2392,8 @@ static void ecore_hw_hwfn_prepare(struct
> ecore_hwfn *p_hwfn)
>  			 PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
>  	}
> 
> -	/* Clean Previous errors if such exist */
> -	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
> -		 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn-
> >abs_pf_id);
> +	/* Clean previous pglue_b errors if such exist */
> +	ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
> 
>  	/* enable internal target-read */
>  	ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
> diff --git a/drivers/net/qede/base/ecore_dev_api.h
> b/drivers/net/qede/base/ecore_dev_api.h
> index 886407b..eea22e0 100644
> --- a/drivers/net/qede/base/ecore_dev_api.h
> +++ b/drivers/net/qede/base/ecore_dev_api.h
> @@ -584,4 +584,16 @@ enum _ecore_status_t
> ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
>  			 u16 tx_coal, void *p_handle);
> 
> +/**
> + * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS
> +MASTER
> + *
> + * @param p_hwfn
> + * @param p_ptt
> + * @param b_enable - true/false
> + *
> + * @return enum _ecore_status_t
> + */
> +enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn
> *p_hwfn,
> +						  struct ecore_ptt *p_ptt,
> +						  bool b_enable);
>  #endif
> diff --git a/drivers/net/qede/base/ecore_int.c
> b/drivers/net/qede/base/ecore_int.c
> index 2afca29..b57c510 100644
> --- a/drivers/net/qede/base/ecore_int.c
> +++ b/drivers/net/qede/base/ecore_int.c
> @@ -284,122 +284,119 @@ static enum _ecore_status_t
> ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)  #define
> ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)  #define
> ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)  #define
> ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) -static enum
> _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
> +
> +enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn
> *p_hwfn,
> +						   struct ecore_ptt *p_ptt)
>  {
>  	u32 tmp;
> 
> -	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> -		       PGLUE_B_REG_TX_ERR_WR_DETAILS2);
> +	tmp = ecore_rd(p_hwfn, p_ptt,
> PGLUE_B_REG_TX_ERR_WR_DETAILS2);
>  	if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
>  		u32 addr_lo, addr_hi, details;
> 
> -		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_lo = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
> -		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_hi = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
> -		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		details = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_WR_DETAILS);
> 
> -		DP_INFO(p_hwfn,
> -			"Illegal write by chip to [%08x:%08x] blocked."
> -			"Details: %08x [PFID %02x, VFID %02x, VF_VALID
> %02x]"
> -			" Details2 %08x [Was_error %02x BME deassert
> %02x"
> -			" FID_enable deassert %02x]\n",
> -			addr_hi, addr_lo, details,
> -			(u8)((details &
> -
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
> -
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
> -			(u8)((details &
> -
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
> -
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
> -			(u8)((details &
> ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
> -			     ? 1 : 0), tmp,
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
> -			     : 0),
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
> -			     0),
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
> -			     : 0));
> +		DP_NOTICE(p_hwfn, false,
> +			  "Illegal write by chip to [%08x:%08x] blocked.
> Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x
> [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
> +			  addr_hi, addr_lo, details,
> +			  (u8)((details &
> +
> 	ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
> +
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
> +			  (u8)((details &
> +
> 	ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
> +
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
> +			  (u8)((details &
> +			       ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
> ? 1 : 0),
> +			  tmp,
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
> +				1 : 0),
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
> +				1 : 0),
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
> +				1 : 0));
>  	}
> 
> -	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> -		       PGLUE_B_REG_TX_ERR_RD_DETAILS2);
> +	tmp = ecore_rd(p_hwfn, p_ptt,
> PGLUE_B_REG_TX_ERR_RD_DETAILS2);
>  	if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
>  		u32 addr_lo, addr_hi, details;
> 
> -		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_lo = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
> -		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_hi = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
> -		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		details = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_TX_ERR_RD_DETAILS);
> 
> -		DP_INFO(p_hwfn,
> -			"Illegal read by chip from [%08x:%08x] blocked."
> -			" Details: %08x [PFID %02x, VFID %02x, VF_VALID
> %02x]"
> -			" Details2 %08x [Was_error %02x BME deassert
> %02x"
> -			" FID_enable deassert %02x]\n",
> -			addr_hi, addr_lo, details,
> -			(u8)((details &
> -
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
> -
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
> -			(u8)((details &
> -
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
> -
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
> -			(u8)((details &
> ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
> -			     ? 1 : 0), tmp,
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
> -			     : 0),
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
> -			     0),
> -			(u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
> -			     : 0));
> +		DP_NOTICE(p_hwfn, false,
> +			  "Illegal read by chip from [%08x:%08x] blocked.
> Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x
> [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
> +			  addr_hi, addr_lo, details,
> +			  (u8)((details &
> +
> 	ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
> +
> ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
> +			  (u8)((details &
> +
> 	ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
> +
> ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
> +			  (u8)((details &
> +			       ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
> ? 1 : 0),
> +			  tmp,
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
> +				1 : 0),
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
> +				1 : 0),
> +			  (u8)((tmp &
> ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
> +				1 : 0));
>  	}
> 
> -	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> -		       PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
> +	tmp = ecore_rd(p_hwfn, p_ptt,
> PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
>  	if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
> -		DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
> +		DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
> 
> -	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> -		       PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
> +	tmp = ecore_rd(p_hwfn, p_ptt,
> PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
>  	if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
>  		u32 addr_hi, addr_lo;
> 
> -		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_lo = ecore_rd(p_hwfn, p_ptt,
> 
> PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
> -		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_hi = ecore_rd(p_hwfn, p_ptt,
> 
> PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
> 
> -		DP_INFO(p_hwfn, "ICPL error - %08x [Address
> %08x:%08x]\n",
> -			tmp, addr_hi, addr_lo);
> +		DP_NOTICE(p_hwfn, false,
> +			  "ICPL erorr - %08x [Address %08x:%08x]\n",
> +			  tmp, addr_hi, addr_lo);
>  	}
> 
> -	tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> -		       PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
> +	tmp = ecore_rd(p_hwfn, p_ptt,
> PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
>  	if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
>  		u32 addr_hi, addr_lo, details;
> 
> -		addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_lo = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
> -		addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		addr_hi = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
> -		details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
> +		details = ecore_rd(p_hwfn, p_ptt,
>  				   PGLUE_B_REG_VF_ILT_ERR_DETAILS);
> 
> -		DP_INFO(p_hwfn,
> -			"ILT error - Details %08x Details2 %08x"
> -			" [Address %08x:%08x]\n",
> -			details, tmp, addr_hi, addr_lo);
> +		DP_NOTICE(p_hwfn, false,
> +			  "ILT error - Details %08x Details2 %08x [Address
> %08x:%08x]\n",
> +			  details, tmp, addr_hi, addr_lo);
>  	}
> 
>  	/* Clear the indications */
> -	ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
> -		 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
> +	ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1
> << 2));
> 
>  	return ECORE_SUCCESS;
>  }
> 
> +static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct
> ecore_hwfn
> +*p_hwfn) {
> +	return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn-
> >p_dpc_ptt); }
> +
>  static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn
> *p_hwfn)  {
>  	DP_NOTICE(p_hwfn, false, "FW assertion!\n"); @@ -505,7 +502,7
> @@ enum aeu_invert_reg_special_type {
>  	 {			/* After Invert 2 */
>  	  {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL,
> MAX_BLOCK_ID},
>  	  {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL,
> MAX_BLOCK_ID},
> -	  {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
> +	  {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
>  	   BLOCK_PGLUE_B},
>  	  {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL,
> MAX_BLOCK_ID},
>  	  {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
> @@ -827,8 +824,9 @@ static void ecore_int_attn_print(struct ecore_hwfn
> *p_hwfn,
>  				     ATTN_TYPE_INTERRUPT, !b_fatal);  }
> 
> +	/* @DPDK */
>  	/* Reach assertion if attention is fatal */
> -	if (b_fatal) {
> +	if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
>  		DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
>  			  p_bit_name);
> 
> diff --git a/drivers/net/qede/base/ecore_int.h
> b/drivers/net/qede/base/ecore_int.h
> index 0c8929e..067ed60 100644
> --- a/drivers/net/qede/base/ecore_int.h
> +++ b/drivers/net/qede/base/ecore_int.h
> @@ -208,4 +208,7 @@ enum _ecore_status_t
> ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,  #define
> ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)  #endif
> 
> +enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn
> *p_hwfn,
> +						   struct ecore_ptt *p_ptt);
> +
>  #endif /* __ECORE_INT_H__ */
> diff --git a/drivers/net/qede/base/ecore_mcp.c
> b/drivers/net/qede/base/ecore_mcp.c
> index 03cc901..88c5ceb 100644
> --- a/drivers/net/qede/base/ecore_mcp.c
> +++ b/drivers/net/qede/base/ecore_mcp.c
> @@ -893,6 +893,30 @@ enum _ecore_status_t ecore_mcp_load_req(struct
> ecore_hwfn *p_hwfn,
>  	return ECORE_SUCCESS;
>  }
> 
> +enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn
> *p_hwfn,
> +					 struct ecore_ptt *p_ptt)
> +{
> +	u32 resp = 0, param = 0;
> +	enum _ecore_status_t rc;
> +
> +	rc = ecore_mcp_cmd(p_hwfn, p_ptt,
> DRV_MSG_CODE_LOAD_DONE, 0, &resp,
> +			   &param);
> +	if (rc != ECORE_SUCCESS) {
> +		DP_NOTICE(p_hwfn, false,
> +			  "Failed to send a LOAD_DONE command, rc =
> %d\n", rc);
> +		return rc;
> +	}
> +
> +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR     (1 << 0)
> +
> +	/* Check if there is a DID mismatch between nvm-cfg/efuse */
> +	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
> +		DP_NOTICE(p_hwfn, false,
> +			  "warning: device configuration is not supported on
> this board
> +type. The device may not function as expected.\n");
> +
> +	return ECORE_SUCCESS;
> +}
> +
>  enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn
> *p_hwfn,
>  					  struct ecore_ptt *p_ptt)
>  {
> @@ -2893,6 +2917,27 @@ struct ecore_resc_alloc_out_params {
>  	u32 flags;
>  };
> 
> +#define ECORE_RECOVERY_PROLOG_SLEEP_MS	100
> +
> +enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev) {
> +	struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
> +	struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
> +	enum _ecore_status_t rc;
> +
> +	/* Allow ongoing PCIe transactions to complete */
> +	OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
> +
> +	/* Clear the PF's internal FID_enable in the PXP */
> +	rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
> +	if (rc != ECORE_SUCCESS)
> +		DP_NOTICE(p_hwfn, false,
> +			  "ecore_pglueb_set_pfid_enable() failed. rc =
> %d.\n",
> +			  rc);
> +
> +	return rc;
> +}
> +
>  static enum _ecore_status_t
>  ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
>  			      struct ecore_ptt *p_ptt,
> diff --git a/drivers/net/qede/base/ecore_mcp.h
> b/drivers/net/qede/base/ecore_mcp.h
> index 37d1835..77fb5a3 100644
> --- a/drivers/net/qede/base/ecore_mcp.h
> +++ b/drivers/net/qede/base/ecore_mcp.h
> @@ -171,6 +171,17 @@ enum _ecore_status_t ecore_mcp_load_req(struct
> ecore_hwfn *p_hwfn,
>  					struct ecore_load_req_params
> *p_params);
> 
>  /**
> + * @brief Sends a LOAD_DONE message to the MFW
> + *
> + * @param p_hwfn
> + * @param p_ptt
> + *
> + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was
> successful.
> + */
> +enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn
> *p_hwfn,
> +					 struct ecore_ptt *p_ptt);
> +
> +/**
>   * @brief Sends a UNLOAD_REQ message to the MFW
>   *
>   * @param p_hwfn
> diff --git a/drivers/net/qede/base/ecore_mcp_api.h
> b/drivers/net/qede/base/ecore_mcp_api.h
> index 190c135..abc190c 100644
> --- a/drivers/net/qede/base/ecore_mcp_api.h
> +++ b/drivers/net/qede/base/ecore_mcp_api.h
> @@ -736,6 +736,17 @@ enum _ecore_status_t
> ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
>  						  struct ecore_ptt *p_ptt);
> 
>  /**
> + * @brief A recovery handler must call this function as its first step.
> + *        It is assumed that the handler is not run from an interrupt context.
> + *
> + *  @param p_dev
> + *  @param p_ptt
> + *
> + * @return enum _ecore_status_t
> + */
> +enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
> +
> +/**
>   * @brief Notify MFW about the change in base device properties
>   *
>   *  @param p_hwfn
> --
> 1.7.10.3

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit
  2017-07-24 10:18 ` [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Mody, Rasesh
@ 2017-07-24 15:00   ` Thomas Monjalon
  0 siblings, 0 replies; 9+ messages in thread
From: Thomas Monjalon @ 2017-07-24 15:00 UTC (permalink / raw)
  To: Mody, Rasesh; +Cc: dev, Dept-Eng DPDK Dev

24/07/2017 13:18, Mody, Rasesh:
> Please ignore this patch-set with 'INTERNAL REVIEW' subject prefix. Sending a follow up with proper subject prefix.

Please update patchwork as well.

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-07-24 15:00 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-24 10:10 [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 2/7] net/qede: fix incorrect queue id for 100G Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 3/7] net/qede/base: fix for adapter specific stats Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 4/7] net/qede: fix inner L3/L4 chksum offload for tunnel frames Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 5/7] doc: list NPAR as supported feature in qede Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 6/7] net/qede: fix chip details print Rasesh Mody
2017-07-24 10:10 ` [dpdk-dev] [INTERNAL REVIEW 7/7] net/qede: update PMD version 2.5.2.1 Rasesh Mody
2017-07-24 10:18 ` [dpdk-dev] [INTERNAL REVIEW 1/7] net/qede/base: fix recovery from previous ungraceful exit Mody, Rasesh
2017-07-24 15:00   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).