DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver
@ 2020-08-25 11:52 Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 01/11] net/hns3: get device capability from firmware Wei Hu (Xavier)
                   ` (11 more replies)
  0 siblings, 12 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

This series are features and fixes for hns3 PMD driver.

Huisong Li (3):
  net/hns3: replace private macro with RTE MAX
  net/hns3: fix default MAC addr from firmware
  net/hns3: fix some incomplete command structures

Wei Hu (Xavier) (8):
  net/hns3: get device capability from firmware
  net/hns3: get dev specifications from firmware
  net/hns3: compatibility issues about Rx interrupts
  net/hns3: compatibility issues about Tx padding short frame
  net/hns3: add more hardware error types
  net/hns3: support a maximun 256 FDIR counter
  net/hns3: change the log level to INFO
  net/hns3: fix Rx/Tx queue offload capability

 drivers/net/hns3/hns3_cmd.c       |   36 +-
 drivers/net/hns3/hns3_cmd.h       |   94 ++-
 drivers/net/hns3/hns3_dcb.c       |    1 -
 drivers/net/hns3/hns3_dcb.h       |   14 +-
 drivers/net/hns3/hns3_ethdev.c    |  187 ++++-
 drivers/net/hns3/hns3_ethdev.h    |  138 +++-
 drivers/net/hns3/hns3_ethdev_vf.c |  124 ++-
 drivers/net/hns3/hns3_fdir.c      |    5 +
 drivers/net/hns3/hns3_intr.c      | 1236 ++++++++++++++++++++++++-----
 drivers/net/hns3/hns3_intr.h      |   40 +-
 drivers/net/hns3/hns3_regs.h      |    7 +
 drivers/net/hns3/hns3_rxtx.c      |   31 +-
 drivers/net/hns3/hns3_rxtx.h      |   11 +-
 drivers/net/hns3/hns3_stats.c     |   78 +-
 drivers/net/hns3/hns3_stats.h     |    2 +
 15 files changed, 1666 insertions(+), 338 deletions(-)

-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 01/11] net/hns3: get device capability from firmware
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
@ 2020-08-25 11:52 ` Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 02/11] net/hns3: get dev specifications " Wei Hu (Xavier)
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

This patch adds getting device capabilities from firmware, so driver can
supply differnet cpabilities and specifications to upper level
applications base on differnet versions of hardware network engine.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.c    | 36 ++++++++++++++++++++++++++++------
 drivers/net/hns3/hns3_cmd.h    | 19 +++++++++++++++++-
 drivers/net/hns3/hns3_ethdev.c |  3 ---
 drivers/net/hns3/hns3_ethdev.h | 29 +++++++++++++++++++++++++++
 4 files changed, 77 insertions(+), 10 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index cbb09887c..0299072ef 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -426,8 +426,29 @@ hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
 	return retval;
 }
 
+static void hns3_parse_capability(struct hns3_hw *hw,
+				  struct hns3_query_version_cmd *cmd)
+{
+	uint32_t caps = rte_le_to_cpu_32(cmd->caps[0]);
+
+	if (hns3_get_bit(caps, HNS3_CAPS_UDP_GSO_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_UDP_GSO_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_ADQ_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_ADQ_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_PTP_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_PTP_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_TX_PUSH_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_TX_PUSH_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_PHY_IMP_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_TQP_TXRX_INDEP_B))
+		hns3_set_bit(hw->capability, HNS3_CAPS_TQP_TXRX_INDEP_B, 1);
+	if (hns3_get_bit(caps, HNS3_CAPS_STASH_B))
+		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_STASH_B, 1);
+}
+
 static enum hns3_cmd_status
-hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
+hns3_cmd_query_firmware_version_and_capability(struct hns3_hw *hw)
 {
 	struct hns3_query_version_cmd *resp;
 	struct hns3_cmd_desc desc;
@@ -438,10 +459,13 @@ hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
 
 	/* Initialize the cmd function */
 	ret = hns3_cmd_send(hw, &desc, 1);
-	if (ret == 0)
-		*version = rte_le_to_cpu_32(resp->firmware);
+	if (ret)
+		return ret;
 
-	return ret;
+	hw->fw_version = rte_le_to_cpu_32(resp->firmware);
+	hns3_parse_capability(hw, resp);
+
+	return 0;
 }
 
 int
@@ -519,13 +543,13 @@ hns3_cmd_init(struct hns3_hw *hw)
 	}
 	rte_atomic16_clear(&hw->reset.disable_cmd);
 
-	ret = hns3_cmd_query_firmware_version(hw, &version);
+	ret = hns3_cmd_query_firmware_version_and_capability(hw);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
 		goto err_cmd_init;
 	}
 
-	hw->fw_version = version;
+	version = hw->fw_version;
 	PMD_INIT_LOG(INFO, "The firmware version is %lu.%lu.%lu.%lu",
 		     hns3_get_field(version, HNS3_FW_VERSION_BYTE3_M,
 				    HNS3_FW_VERSION_BYTE3_S),
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index d70f42e5d..a13b799f4 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -264,9 +264,26 @@ struct hns3_rx_priv_buff_cmd {
 #define HNS3_FW_VERSION_BYTE1_M		GENMASK(15, 8)
 #define HNS3_FW_VERSION_BYTE0_S		0
 #define HNS3_FW_VERSION_BYTE0_M		GENMASK(7, 0)
+
+enum HNS3_CAPS_BITS {
+	HNS3_CAPS_UDP_GSO_B,
+	HNS3_CAPS_ATR_B,
+	HNS3_CAPS_ADQ_B,
+	HNS3_CAPS_PTP_B,
+	HNS3_CAPS_INT_QL_B,
+	HNS3_CAPS_SIMPLE_BD_B,
+	HNS3_CAPS_TX_PUSH_B,
+	HNS3_CAPS_PHY_IMP_B,
+	HNS3_CAPS_TQP_TXRX_INDEP_B,
+	HNS3_CAPS_HW_PAD_B,
+	HNS3_CAPS_STASH_B,
+};
+#define HNS3_QUERY_CAP_LENGTH		3
 struct hns3_query_version_cmd {
 	uint32_t firmware;
-	uint32_t firmware_rsv[5];
+	uint32_t hardware;
+	uint32_t rsv;
+	uint32_t caps[HNS3_QUERY_CAP_LENGTH]; /* capabilities of device */
 };
 
 #define HNS3_RX_PRIV_EN_B	15
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index fab1914c3..44fd69fa1 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2837,9 +2837,6 @@ hns3_get_capability(struct hns3_hw *hw)
 	}
 	hw->revision = revision;
 
-	if (revision >= PCI_REVISION_ID_HIP09_A)
-		hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_COPPER_B, 1);
-
 	return 0;
 }
 
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 0e665e59b..1914e588d 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -535,13 +535,42 @@ struct hns3_adapter {
 
 #define HNS3_DEV_SUPPORT_DCB_B			0x0
 #define HNS3_DEV_SUPPORT_COPPER_B		0x1
+#define HNS3_DEV_SUPPORT_UDP_GSO_B		0x2
+#define HNS3_DEV_SUPPORT_ADQ_B			0x3
+#define HNS3_DEV_SUPPORT_PTP_B			0x4
+#define HNS3_DEV_SUPPORT_TX_PUSH_B		0x5
+#define HNS3_DEV_SUPPORT_INDEP_TXRX_B		0x6
+#define HNS3_DEV_SUPPORT_STASH_B		0x7
 
 #define hns3_dev_dcb_supported(hw) \
 	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_DCB_B)
 
+/* Support copper media type */
 #define hns3_dev_copper_supported(hw) \
 	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_COPPER_B)
 
+/* Support UDP GSO offload */
+#define hns3_dev_udp_gso_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_UDP_GSO_B)
+
+/* Support Application Device Queue */
+#define hns3_dev_adq_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_ADQ_B)
+
+/* Support PTP timestamp offload */
+#define hns3_dev_ptp_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_PTP_B)
+
+#define hns3_dev_tx_push_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_TX_PUSH_B)
+
+/* Support to Independently enable/disable/reset Tx or Rx queues */
+#define hns3_dev_indep_txrx_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_INDEP_TXRX_B)
+
+#define hns3_dev_stash_supported(hw) \
+	hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B)
+
 #define HNS3_DEV_PRIVATE_TO_HW(adapter) \
 	(&((struct hns3_adapter *)adapter)->hw)
 #define HNS3_DEV_PRIVATE_TO_ADAPTER(adapter) \
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 02/11] net/hns3: get dev specifications from firmware
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 01/11] net/hns3: get device capability from firmware Wei Hu (Xavier)
@ 2020-08-25 11:52 ` Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 03/11] net/hns3: compatibility issues about Rx interrupts Wei Hu (Xavier)
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

This patch adds getting PF/VF device specifications from firmware.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h       | 15 ++++++++
 drivers/net/hns3/hns3_dcb.c       |  1 -
 drivers/net/hns3/hns3_dcb.h       |  2 +
 drivers/net/hns3/hns3_ethdev.c    | 61 ++++++++++++++++++++++++++++++-
 drivers/net/hns3/hns3_ethdev.h    |  6 +++
 drivers/net/hns3/hns3_ethdev_vf.c | 59 +++++++++++++++++++++++++++++-
 6 files changed, 141 insertions(+), 3 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index a13b799f4..65aa8bad8 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -93,6 +93,8 @@ enum hns3_opcode_type {
 	HNS3_OPC_QUERY_32_BIT_REG       = 0x0041,
 	HNS3_OPC_QUERY_64_BIT_REG       = 0x0042,
 
+	HNS3_OPC_QUERY_DEV_SPECS        = 0x0050,
+
 	/* MAC command */
 	HNS3_OPC_CONFIG_MAC_MODE        = 0x0301,
 	HNS3_OPC_QUERY_LINK_STATUS      = 0x0307,
@@ -805,6 +807,19 @@ struct hns3_reset_cmd {
 	uint8_t rsv[22];
 };
 
+#define HNS3_QUERY_DEV_SPECS_BD_NUM		4
+struct hns3_dev_specs_0_cmd {
+	uint32_t rsv0;
+	uint32_t mac_entry_num;
+	uint32_t mng_entry_num;
+	uint16_t rss_ind_tbl_size;
+	uint16_t rss_key_size;
+	uint16_t intr_ql_max;
+	uint8_t max_non_tso_bd_num;
+	uint8_t rsv1;
+	uint32_t max_tm_rate;
+};
+
 #define HNS3_MAX_TQP_NUM_PER_FUNC	64
 #define HNS3_DEFAULT_TX_BUF		0x4000    /* 16k  bytes */
 #define HNS3_TOTAL_PKT_BUF		0x108000  /* 1.03125M bytes */
diff --git a/drivers/net/hns3/hns3_dcb.c b/drivers/net/hns3/hns3_dcb.c
index 02628b6b6..c1be49e0f 100644
--- a/drivers/net/hns3/hns3_dcb.c
+++ b/drivers/net/hns3/hns3_dcb.c
@@ -19,7 +19,6 @@
 #define HNS3_SHAPER_BS_U_DEF	5
 #define HNS3_SHAPER_BS_S_DEF	20
 #define BW_MAX_PERCENT		100
-#define HNS3_ETHER_MAX_RATE	100000
 
 /*
  * hns3_shaper_para_calc: calculate ir parameter for the shaper
diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index 9c2c5f21c..1636c5ae8 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -5,6 +5,8 @@
 #ifndef _HNS3_DCB_H_
 #define _HNS3_DCB_H_
 
+#define HNS3_ETHER_MAX_RATE		100000
+
 /* MAC Pause */
 #define HNS3_TX_MAC_PAUSE_EN_MSK	BIT(0)
 #define HNS3_RX_MAC_PAUSE_EN_MSK	BIT(1)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 44fd69fa1..951f26d42 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2809,6 +2809,51 @@ hns3_parse_speed(int speed_cmd, uint32_t *speed)
 	return 0;
 }
 
+static void
+hns3_set_default_dev_specifications(struct hns3_hw *hw)
+{
+	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
+	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
+	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
+	hw->max_tm_rate = HNS3_ETHER_MAX_RATE;
+}
+
+static void
+hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
+{
+	struct hns3_dev_specs_0_cmd *req0;
+
+	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
+
+	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
+	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
+	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
+	hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate);
+}
+
+static int
+hns3_query_dev_specifications(struct hns3_hw *hw)
+{
+	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
+	int ret;
+	int i;
+
+	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
+					  true);
+		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	}
+	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
+
+	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
+	if (ret)
+		return ret;
+
+	hns3_parse_dev_specifications(hw, desc);
+
+	return 0;
+}
+
 static int
 hns3_get_capability(struct hns3_hw *hw)
 {
@@ -2832,11 +2877,25 @@ hns3_get_capability(struct hns3_hw *hw)
 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
 				  HNS3_PCI_REVISION_ID);
 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
-		PMD_INIT_LOG(ERR, "failed to read pci revision id: %d", ret);
+		PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
+			     ret);
 		return -EIO;
 	}
 	hw->revision = revision;
 
+	if (revision < PCI_REVISION_ID_HIP09_A) {
+		hns3_set_default_dev_specifications(hw);
+		return 0;
+	}
+
+	ret = hns3_query_dev_specifications(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR,
+			     "failed to query dev specifications, ret = %d",
+			     ret);
+		return ret;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 1914e588d..1810cf0ed 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -381,6 +381,8 @@ struct hns3_hw {
 	uint16_t rss_size_max;      /* HW defined max RSS task queue */
 	uint16_t num_tx_desc;       /* desc num of per tx queue */
 	uint16_t num_rx_desc;       /* desc num of per rx queue */
+	uint32_t mng_entry_num;     /* number of manager table entry */
+	uint32_t mac_entry_num;     /* number of mac-vlan table entry */
 
 	struct rte_ether_addr mc_addrs[HNS3_MC_MACADDR_NUM];
 	int mc_addrs_num; /* Multicast mac addresses number */
@@ -388,6 +390,8 @@ struct hns3_hw {
 	/* The configuration info of RSS */
 	struct hns3_rss_conf rss_info;
 	bool rss_dis_flag; /* disable rss flag. true: disable, false: enable */
+	uint16_t rss_ind_tbl_size;
+	uint16_t rss_key_size;
 
 	uint8_t num_tc;             /* Total number of enabled TCs */
 	uint8_t hw_tc_map;
@@ -406,6 +410,8 @@ struct hns3_hw {
 	uint16_t tx_qnum_per_tc;    /* TX queue number per TC */
 
 	uint32_t capability;
+	uint32_t max_tm_rate;
+	uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
 
 	struct hns3_port_base_vlan_config port_base_vlan_cfg;
 	/*
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 60b576b02..19a077209 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1068,6 +1068,49 @@ hns3vf_interrupt_handler(void *param)
 	hns3vf_enable_irq0(hw);
 }
 
+static void
+hns3vf_set_default_dev_specifications(struct hns3_hw *hw)
+{
+	hw->max_non_tso_bd_num = HNS3_MAX_NON_TSO_BD_PER_PKT;
+	hw->rss_ind_tbl_size = HNS3_RSS_IND_TBL_SIZE;
+	hw->rss_key_size = HNS3_RSS_KEY_SIZE;
+}
+
+static void
+hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc)
+{
+	struct hns3_dev_specs_0_cmd *req0;
+
+	req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data;
+
+	hw->max_non_tso_bd_num = req0->max_non_tso_bd_num;
+	hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size);
+	hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size);
+}
+
+static int
+hns3vf_query_dev_specifications(struct hns3_hw *hw)
+{
+	struct hns3_cmd_desc desc[HNS3_QUERY_DEV_SPECS_BD_NUM];
+	int ret;
+	int i;
+
+	for (i = 0; i < HNS3_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+		hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS,
+					  true);
+		desc[i].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	}
+	hns3_cmd_setup_basic_desc(&desc[i], HNS3_OPC_QUERY_DEV_SPECS, true);
+
+	ret = hns3_cmd_send(hw, desc, HNS3_QUERY_DEV_SPECS_BD_NUM);
+	if (ret)
+		return ret;
+
+	hns3vf_parse_dev_specifications(hw, desc);
+
+	return 0;
+}
+
 static int
 hns3vf_get_capability(struct hns3_hw *hw)
 {
@@ -1083,11 +1126,25 @@ hns3vf_get_capability(struct hns3_hw *hw)
 	ret = rte_pci_read_config(pci_dev, &revision, HNS3_PCI_REVISION_ID_LEN,
 				  HNS3_PCI_REVISION_ID);
 	if (ret != HNS3_PCI_REVISION_ID_LEN) {
-		PMD_INIT_LOG(ERR, "failed to read pci revision id: %d", ret);
+		PMD_INIT_LOG(ERR, "failed to read pci revision id, ret = %d",
+			     ret);
 		return -EIO;
 	}
 	hw->revision = revision;
 
+	if (revision < PCI_REVISION_ID_HIP09_A) {
+		hns3vf_set_default_dev_specifications(hw);
+		return 0;
+	}
+
+	ret = hns3vf_query_dev_specifications(hw);
+	if (ret) {
+		PMD_INIT_LOG(ERR,
+			     "failed to query dev specifications, ret = %d",
+			     ret);
+		return ret;
+	}
+
 	return 0;
 }
 
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 03/11] net/hns3: compatibility issues about Rx interrupts
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 01/11] net/hns3: get device capability from firmware Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 02/11] net/hns3: get dev specifications " Wei Hu (Xavier)
@ 2020-08-25 11:52 ` Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 04/11] net/hns3: compatibility issues about Tx padding short frame Wei Hu (Xavier)
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

There are difference about queue's interrupt configurations for different
versions of hardware network engine, such as queue's interrupt mapping
mode, coalesce configuration, etc.

The following uses the configuration differences of the interrupt mapping
mode as an example.
1) For some versions of hardware network engine, such as kunpeng 920,
   because of the hardware constraint, we need implement unmmapping
   relationship configurations by binding all queues to the last interrupt
   vector and reserving the last interrupt vector. This results in a
   decrease of the maximum queues when upper applications call the
   rte_eth_dev_configure API function to enable Rx interrupt.
2) And for another versions, sunch as kunpeng 930, hns3 PMD driver can
   map/unmmap all interrupt vectors with queues when Rx interrupt is
   enabled.

This patch resolves configuration differences about Rx interrupts based on
kunpeng 920 and kunpeng 930.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h       | 14 +++++---
 drivers/net/hns3/hns3_ethdev.c    | 31 +++++++++--------
 drivers/net/hns3/hns3_ethdev.h    | 56 +++++++++++++++++++++++++++++++
 drivers/net/hns3/hns3_ethdev_vf.c | 29 +++++++++-------
 drivers/net/hns3/hns3_regs.h      |  4 +++
 drivers/net/hns3/hns3_rxtx.c      | 20 ++++++++++-
 drivers/net/hns3/hns3_rxtx.h      |  2 ++
 7 files changed, 123 insertions(+), 33 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index 65aa8bad8..c2b0361b6 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -368,21 +368,25 @@ struct hns3_func_status_cmd {
 	uint8_t rsv[2];
 };
 
-#define HNS3_VEC_NUM_S		0
-#define HNS3_VEC_NUM_M		GENMASK(7, 0)
+#define HNS3_PF_VEC_NUM_S	0
+#define HNS3_PF_VEC_NUM_M	GENMASK(15, 0)
 #define HNS3_MIN_VECTOR_NUM	2 /* one for msi-x, another for IO */
 struct hns3_pf_res_cmd {
 	uint16_t tqp_num;
 	uint16_t buf_size;
 	uint16_t msixcap_localid_ba_nic;
-	uint16_t msixcap_localid_ba_rocee;
-	uint16_t pf_intr_vector_number;
+	uint16_t nic_pf_intr_vector_number;
+	uint16_t roce_pf_intr_vector_number;
 	uint16_t pf_own_fun_number;
 	uint16_t tx_buf_size;
 	uint16_t dv_buf_size;
-	uint32_t rsv[2];
+	uint16_t tqp_num_ext;
+	uint16_t roh_pf_intr_vector_number;
+	uint32_t rsv[1];
 };
 
+#define HNS3_VF_VEC_NUM_S	0
+#define HNS3_VF_VEC_NUM_M	GENMASK(7, 0)
 struct hns3_vf_res_cmd {
 	uint16_t tqp_num;
 	uint16_t reserved;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 951f26d42..3cc1fbc32 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2208,7 +2208,7 @@ hns3_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id, bool mmap,
 static int
 hns3_init_ring_with_vector(struct hns3_hw *hw)
 {
-	uint8_t vec;
+	uint16_t vec;
 	int ret;
 	int i;
 
@@ -2219,27 +2219,23 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
 	 * vector. In the initialization clearing the all hardware mapping
 	 * relationship configurations between queues and interrupt vectors is
 	 * needed, so some error caused by the residual configurations, such as
-	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
-	 * constraints in hns3 hardware engine, we have to implement clearing
-	 * the mapping relationship configurations by binding all queues to the
-	 * last interrupt vector and reserving the last interrupt vector. This
-	 * method results in a decrease of the maximum queues when upper
-	 * applications call the rte_eth_dev_configure API function to enable
-	 * Rx interrupt.
+	 * the unexpected Tx interrupt, can be avoid.
 	 */
 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
-	/* vec - 1: the last interrupt is reserved */
-	hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
+	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
+		vec = vec - 1; /* the last interrupt is reserved */
+	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
 	for (i = 0; i < hw->intr_tqps_num; i++) {
 		/*
-		 * Set gap limiter and rate limiter configuration of queue's
-		 * interrupt.
+		 * Set gap limiter/rate limiter/quanity limiter algorithm
+		 * configuration for interrupt coalesce of queue's interrupt.
 		 */
 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
 				       HNS3_TQP_INTR_GL_DEFAULT);
 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
 				       HNS3_TQP_INTR_GL_DEFAULT);
 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
 
 		ret = hns3_bind_ring_with_vector(hw, vec, false,
 						 HNS3_RING_TYPE_TX, i);
@@ -2669,8 +2665,8 @@ hns3_query_pf_resource(struct hns3_hw *hw)
 	pf->dv_buf_size = roundup(pf->dv_buf_size, HNS3_BUF_SIZE_UNIT);
 
 	hw->num_msi =
-	    hns3_get_field(rte_le_to_cpu_16(req->pf_intr_vector_number),
-			   HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+		hns3_get_field(rte_le_to_cpu_16(req->nic_pf_intr_vector_number),
+			       HNS3_PF_VEC_NUM_M, HNS3_PF_VEC_NUM_S);
 
 	return 0;
 }
@@ -2885,6 +2881,9 @@ hns3_get_capability(struct hns3_hw *hw)
 
 	if (revision < PCI_REVISION_ID_HIP09_A) {
 		hns3_set_default_dev_specifications(hw);
+		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+		hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
+		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
 		return 0;
 	}
 
@@ -2896,6 +2895,10 @@ hns3_get_capability(struct hns3_hw *hw)
 		return ret;
 	}
 
+	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
+	hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
+	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+
 	return 0;
 }
 
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 1810cf0ed..b8eb7ddc1 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -359,6 +359,59 @@ struct hns3_reset_data {
 	struct hns3_wait_data *wait_data;
 };
 
+#define HNS3_INTR_MAPPING_VEC_RSV_ONE		0
+#define HNS3_INTR_MAPPING_VEC_ALL		1
+
+#define HNS3_INTR_COALESCE_NON_QL		0
+#define HNS3_INTR_COALESCE_QL			1
+
+#define HNS3_INTR_COALESCE_GL_UINT_2US		0
+#define HNS3_INTR_COALESCE_GL_UINT_1US		1
+
+struct hns3_queue_intr {
+	/*
+	 * interrupt mapping mode.
+	 * value range:
+	 *      HNS3_INTR_MAPPING_VEC_RSV_ONE/HNS3_INTR_MAPPING_VEC_ALL
+	 *
+	 *  - HNS3_INTR_MAPPING_VEC_RSV_ONE
+	 *     For some versions of hardware network engine, because of the
+	 *     hardware constraint, we need implement clearing the mapping
+	 *     relationship configurations by binding all queues to the last
+	 *     interrupt vector and reserving the last interrupt vector. This
+	 *     method results in a decrease of the maximum queues when upper
+	 *     applications call the rte_eth_dev_configure API function to
+	 *     enable Rx interrupt.
+	 *
+	 *  - HNS3_INTR_MAPPING_VEC_ALL
+	 *     PMD driver can map/unmmap all interrupt vectors with queues When
+	 *     Rx interrupt in enabled.
+	 */
+	uint8_t mapping_mode;
+	/*
+	 * interrupt coalesce mode.
+	 * value range:
+	 *      HNS3_INTR_COALESCE_NON_QL/HNS3_INTR_COALESCE_QL
+	 *
+	 *  - HNS3_INTR_COALESCE_NON_QL
+	 *     For some versions of hardware network engine, hardware doesn't
+	 *     support QL(quanity limiter) algorithm for interrupt coalesce
+	 *     of queue's interrupt.
+	 *
+	 *  - HNS3_INTR_COALESCE_QL
+	 *     In this mode, hardware support QL(quanity limiter) algorithm for
+	 *     interrupt coalesce of queue's interrupt.
+	 */
+	uint8_t coalesce_mode;
+	/*
+	 * The unit of GL(gap limiter) configuration for interrupt coalesce of
+	 * queue's interrupt.
+	 * value range:
+	 *      HNS3_INTR_COALESCE_GL_UINT_2US/HNS3_INTR_COALESCE_GL_UINT_1US
+	 */
+	uint8_t gl_unit;
+};
+
 struct hns3_hw {
 	struct rte_eth_dev_data *data;
 	void *io_base;
@@ -411,6 +464,9 @@ struct hns3_hw {
 
 	uint32_t capability;
 	uint32_t max_tm_rate;
+
+	struct hns3_queue_intr intr;
+
 	uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
 
 	struct hns3_port_base_vlan_config port_base_vlan_cfg;
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 19a077209..44657d362 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -693,7 +693,7 @@ hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint8_t vector_id,
 static int
 hns3vf_init_ring_with_vector(struct hns3_hw *hw)
 {
-	uint8_t vec;
+	uint16_t vec;
 	int ret;
 	int i;
 
@@ -704,27 +704,23 @@ hns3vf_init_ring_with_vector(struct hns3_hw *hw)
 	 * vector. In the initialization clearing the all hardware mapping
 	 * relationship configurations between queues and interrupt vectors is
 	 * needed, so some error caused by the residual configurations, such as
-	 * the unexpected Tx interrupt, can be avoid. Because of the hardware
-	 * constraints in hns3 hardware engine, we have to implement clearing
-	 * the mapping relationship configurations by binding all queues to the
-	 * last interrupt vector and reserving the last interrupt vector. This
-	 * method results in a decrease of the maximum queues when upper
-	 * applications call the rte_eth_dev_configure API function to enable
-	 * Rx interrupt.
+	 * the unexpected Tx interrupt, can be avoid.
 	 */
 	vec = hw->num_msi - 1; /* vector 0 for misc interrupt, not for queue */
-	/* vec - 1: the last interrupt is reserved */
-	hw->intr_tqps_num = vec > hw->tqps_num ? hw->tqps_num : vec - 1;
+	if (hw->intr.mapping_mode == HNS3_INTR_MAPPING_VEC_RSV_ONE)
+		vec = vec - 1; /* the last interrupt is reserved */
+	hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
 	for (i = 0; i < hw->intr_tqps_num; i++) {
 		/*
-		 * Set gap limiter and rate limiter configuration of queue's
-		 * interrupt.
+		 * Set gap limiter/rate limiter/quanity limiter algorithm
+		 * configuration for interrupt coalesce of queue's interrupt.
 		 */
 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
 				       HNS3_TQP_INTR_GL_DEFAULT);
 		hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_TX,
 				       HNS3_TQP_INTR_GL_DEFAULT);
 		hns3_set_queue_intr_rl(hw, i, HNS3_TQP_INTR_RL_DEFAULT);
+		hns3_set_queue_intr_ql(hw, i, HNS3_TQP_INTR_QL_DEFAULT);
 
 		ret = hns3vf_bind_ring_with_vector(hw, vec, false,
 						   HNS3_RING_TYPE_TX, i);
@@ -1134,6 +1130,9 @@ hns3vf_get_capability(struct hns3_hw *hw)
 
 	if (revision < PCI_REVISION_ID_HIP09_A) {
 		hns3vf_set_default_dev_specifications(hw);
+		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
+		hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
+		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
 		return 0;
 	}
 
@@ -1145,6 +1144,10 @@ hns3vf_get_capability(struct hns3_hw *hw)
 		return ret;
 	}
 
+	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
+	hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
+	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+
 	return 0;
 }
 
@@ -1616,7 +1619,7 @@ hns3_query_vf_resource(struct hns3_hw *hw)
 
 	req = (struct hns3_vf_res_cmd *)desc.data;
 	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
-				 HNS3_VEC_NUM_M, HNS3_VEC_NUM_S);
+				 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
 	if (num_msi < HNS3_MIN_VECTOR_NUM) {
 		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
 			 num_msi, HNS3_MIN_VECTOR_NUM);
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 64bd6931b..bf6df6300 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -92,13 +92,17 @@
 #define HNS3_TQP_INTR_GL1_REG			0x20200
 #define HNS3_TQP_INTR_GL2_REG			0x20300
 #define HNS3_TQP_INTR_RL_REG			0x20900
+#define HNS3_TQP_INTR_TX_QL_REG			0x20e00
+#define HNS3_TQP_INTR_RX_QL_REG			0x20f00
 
 #define HNS3_TQP_INTR_REG_SIZE			4
 #define HNS3_TQP_INTR_GL_MAX			0x1FE0
 #define HNS3_TQP_INTR_GL_DEFAULT		20
+#define HNS3_TQP_INTR_GL_UNIT_1US		BIT(31)
 #define HNS3_TQP_INTR_RL_MAX			0xEC
 #define HNS3_TQP_INTR_RL_ENABLE_MASK		0x40
 #define HNS3_TQP_INTR_RL_DEFAULT		0
+#define HNS3_TQP_INTR_QL_DEFAULT		0
 
 /* gl_usec convert to hardware count, as writing each 1 represents 2us */
 #define HNS3_GL_USEC_TO_REG(gl_usec)		((gl_usec) >> 1)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index fc1a256f3..d39576621 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -536,7 +536,10 @@ hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
 		return;
 
 	addr = offset[gl_idx] + queue_id * HNS3_TQP_INTR_REG_SIZE;
-	value = HNS3_GL_USEC_TO_REG(gl_value);
+	if (hw->intr.gl_unit == HNS3_INTR_COALESCE_GL_UINT_1US)
+		value = gl_value | HNS3_TQP_INTR_GL_UNIT_1US;
+	else
+		value = HNS3_GL_USEC_TO_REG(gl_value);
 
 	hns3_write_dev(hw, addr, value);
 }
@@ -557,6 +560,21 @@ hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id, uint16_t rl_value)
 	hns3_write_dev(hw, addr, value);
 }
 
+void
+hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id, uint16_t ql_value)
+{
+	uint32_t addr;
+
+	if (hw->intr.coalesce_mode == HNS3_INTR_COALESCE_NON_QL)
+		return;
+
+	addr = HNS3_TQP_INTR_TX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	hns3_write_dev(hw, addr, ql_value);
+
+	addr = HNS3_TQP_INTR_RX_QL_REG + queue_id * HNS3_TQP_INTR_REG_SIZE;
+	hns3_write_dev(hw, addr, ql_value);
+}
+
 static void
 hns3_queue_intr_enable(struct hns3_hw *hw, uint16_t queue_id, bool en)
 {
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index f1fb3b56a..15c609ceb 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -397,6 +397,8 @@ void hns3_set_queue_intr_gl(struct hns3_hw *hw, uint16_t queue_id,
 			    uint8_t gl_idx, uint16_t gl_value);
 void hns3_set_queue_intr_rl(struct hns3_hw *hw, uint16_t queue_id,
 			    uint16_t rl_value);
+void hns3_set_queue_intr_ql(struct hns3_hw *hw, uint16_t queue_id,
+			    uint16_t ql_value);
 int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
 				  uint16_t nb_tx_q);
 int hns3_config_gro(struct hns3_hw *hw, bool en);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 04/11] net/hns3: compatibility issues about Tx padding short frame
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (2 preceding siblings ...)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 03/11] net/hns3: compatibility issues about Rx interrupts Wei Hu (Xavier)
@ 2020-08-25 11:52 ` Wei Hu (Xavier)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types Wei Hu (Xavier)
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

There are differece about padding ultra-short frame in Tx procession for
different versions of hardware network engine.

If packet length is less than minimum packet length supported by hardware
in Tx direction, driver need to pad it to avoid error. The minimum packet
length in Tx direction is 33 based on kunpeng 920, and 9 based on
kunpeng 930.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    |  2 ++
 drivers/net/hns3/hns3_ethdev.h    |  8 +++++++-
 drivers/net/hns3/hns3_ethdev_vf.c |  2 ++
 drivers/net/hns3/hns3_rxtx.c      | 11 +++++++----
 drivers/net/hns3/hns3_rxtx.h      |  9 ++++++++-
 5 files changed, 26 insertions(+), 6 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 3cc1fbc32..4797cfb2f 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2884,6 +2884,7 @@ hns3_get_capability(struct hns3_hw *hw)
 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
 		hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
+		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
 		return 0;
 	}
 
@@ -2898,6 +2899,7 @@ hns3_get_capability(struct hns3_hw *hw)
 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
 	hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+	hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
 
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index b8eb7ddc1..a5405147d 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -58,7 +58,8 @@
 #define HNS3_MAX_MTU	(HNS3_MAX_FRAME_LEN - HNS3_ETH_OVERHEAD)
 #define HNS3_DEFAULT_MTU		1500UL
 #define HNS3_DEFAULT_FRAME_LEN		(HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD)
-#define HNS3_MIN_PKT_SIZE		60
+#define HNS3_HIP08_MIN_TX_PKT_LEN	33
+#define HNS3_HIP09_MIN_TX_PKT_LEN	9
 
 #define HNS3_4_TCS			4
 #define HNS3_8_TCS			8
@@ -464,6 +465,11 @@ struct hns3_hw {
 
 	uint32_t capability;
 	uint32_t max_tm_rate;
+	/*
+	 * The minimun length of the packet supported by hardware in the Tx
+	 * direction.
+	 */
+	uint32_t min_tx_pkt_len;
 
 	struct hns3_queue_intr intr;
 
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 44657d362..3b2ba69bb 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1133,6 +1133,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
 		hw->intr.coalesce_mode = HNS3_INTR_COALESCE_NON_QL;
 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
+		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
 		return 0;
 	}
 
@@ -1147,6 +1148,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
 	hw->intr.coalesce_mode = HNS3_INTR_COALESCE_QL;
 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
+	hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN;
 
 	return 0;
 }
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index d39576621..308d0a671 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -1915,6 +1915,7 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
 	txq->configured = true;
 	txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
 				idx * HNS3_TQP_REG_SIZE);
+	txq->min_tx_pkt_len = hw->min_tx_pkt_len;
 	txq->over_length_pkt_cnt = 0;
 	txq->exceed_limit_bd_pkt_cnt = 0;
 	txq->exceed_limit_bd_reassem_fail = 0;
@@ -2743,14 +2744,16 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		}
 
 		/*
-		 * If packet length is less than minimum packet size, driver
-		 * need to pad it.
+		 * If packet length is less than minimum packet length supported
+		 * by hardware in Tx direction, driver need to pad it to avoid
+		 * error.
 		 */
-		if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < HNS3_MIN_PKT_SIZE)) {
+		if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) <
+						txq->min_tx_pkt_len)) {
 			uint16_t add_len;
 			char *appended;
 
-			add_len = HNS3_MIN_PKT_SIZE -
+			add_len = txq->min_tx_pkt_len -
 					 rte_pktmbuf_pkt_len(tx_pkt);
 			appended = rte_pktmbuf_append(tx_pkt, add_len);
 			if (appended == NULL) {
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 15c609ceb..c365a2925 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -291,6 +291,12 @@ struct hns3_tx_queue {
 	 */
 	uint16_t pvid_state;
 
+	/*
+	 * The minimun length of the packet supported by hardware in the Tx
+	 * direction.
+	 */
+	uint32_t min_tx_pkt_len;
+
 	bool tx_deferred_start; /* don't start this queue in dev start */
 	bool configured;        /* indicate if tx queue has been configured */
 
@@ -333,7 +339,8 @@ struct hns3_tx_queue {
 	 *
 	 * - pkt_padding_fail_cnt
 	 *     Total count which the packet length is less than minimum packet
-	 *     size HNS3_MIN_PKT_SIZE and fail to be appended with 0.
+	 *     length(struct hns3_tx_queue::min_tx_pkt_len) supported by
+	 *     hardware in Tx direction and fail to be appended with 0.
 	 */
 	uint64_t over_length_pkt_cnt;
 	uint64_t exceed_limit_bd_pkt_cnt;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (3 preceding siblings ...)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 04/11] net/hns3: compatibility issues about Tx padding short frame Wei Hu (Xavier)
@ 2020-08-25 11:52 ` Wei Hu (Xavier)
  2020-09-04 10:34   ` Ferruh Yigit
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 06/11] net/hns3: support a maximun 256 FDIR counter Wei Hu (Xavier)
                   ` (6 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:52 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

The new firmware adds the hardware error types reported by MSI-x mode.
These errors are defined as RAS errors in hardware and belong to a
different type from the MSI-x errors processed by the driver.

When hardware detects an hardware errors, which need to be handled with
the driver otherwise the device cannot run properly, it reports error
information through the MSI-x interrupt. After receiving the interrupt
reported by the hardware, the driver queries the error information and
identifies the error level, then rectifies the error. All errors will be
logged. In addition, the hardware may be reset at the function or global
level based on the error level. After the reset is complete, the hardware
will recovers to the normal status.

Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_cmd.h    |   46 +-
 drivers/net/hns3/hns3_ethdev.c |   67 +-
 drivers/net/hns3/hns3_ethdev.h |   34 +-
 drivers/net/hns3/hns3_intr.c   | 1236 ++++++++++++++++++++++++++------
 drivers/net/hns3/hns3_intr.h   |   40 +-
 drivers/net/hns3/hns3_regs.h   |    3 +
 drivers/net/hns3/hns3_stats.c  |   78 +-
 drivers/net/hns3/hns3_stats.h  |    2 +
 8 files changed, 1249 insertions(+), 257 deletions(-)

diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index c2b0361b6..87d60539d 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -100,7 +100,6 @@ enum hns3_opcode_type {
 	HNS3_OPC_QUERY_LINK_STATUS      = 0x0307,
 	HNS3_OPC_CONFIG_MAX_FRM_SIZE    = 0x0308,
 	HNS3_OPC_CONFIG_SPEED_DUP       = 0x0309,
-	HNS3_MAC_COMMON_INT_EN          = 0x030E,
 
 	/* PFC/Pause commands */
 	HNS3_OPC_CFG_MAC_PAUSE_EN       = 0x0701,
@@ -153,10 +152,6 @@ enum hns3_opcode_type {
 	HNS3_OPC_RX_COM_THRD_ALLOC      = 0x0904,
 	HNS3_OPC_RX_COM_WL_ALLOC        = 0x0905,
 
-	/* SSU module INT commands */
-	HNS3_SSU_ECC_INT_CMD            = 0x0989,
-	HNS3_SSU_COMMON_INT_CMD         = 0x098C,
-
 	/* TQP management command */
 	HNS3_OPC_SET_TQP_MAP            = 0x0A01,
 
@@ -166,11 +161,6 @@ enum hns3_opcode_type {
 	HNS3_OPC_CFG_COM_TQP_QUEUE      = 0x0B20,
 	HNS3_OPC_RESET_TQP_QUEUE        = 0x0B22,
 
-	/* PPU module intr commands */
-	HNS3_PPU_MPF_ECC_INT_CMD        = 0x0B40,
-	HNS3_PPU_MPF_OTHER_INT_CMD      = 0x0B41,
-	HNS3_PPU_PF_OTHER_INT_CMD       = 0x0B42,
-
 	/* TSO command */
 	HNS3_OPC_TSO_GENERIC_CONFIG     = 0x0C01,
 	HNS3_OPC_GRO_GENERIC_CONFIG     = 0x0C10,
@@ -216,17 +206,30 @@ enum hns3_opcode_type {
 	HNS3_OPC_SFP_GET_SPEED          = 0x7104,
 
 	/* Interrupts commands */
-	HNS3_OPC_ADD_RING_TO_VECTOR	= 0x1503,
-	HNS3_OPC_DEL_RING_TO_VECTOR	= 0x1504,
+	HNS3_OPC_ADD_RING_TO_VECTOR     = 0x1503,
+	HNS3_OPC_DEL_RING_TO_VECTOR     = 0x1504,
 
 	/* Error INT commands */
-	HNS3_QUERY_MSIX_INT_STS_BD_NUM          = 0x1513,
-	HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT       = 0x1514,
-	HNS3_QUERY_CLEAR_ALL_PF_MSIX_INT        = 0x1515,
-
-	/* PPP module intr commands */
-	HNS3_PPP_CMD0_INT_CMD                   = 0x2100,
-	HNS3_PPP_CMD1_INT_CMD                   = 0x2101,
+	HNS3_OPC_MAC_COMMON_INT_EN              = 0x030E,
+	HNS3_OPC_TM_SCH_ECC_INT_EN              = 0x0829,
+	HNS3_OPC_SSU_ECC_INT_CMD                = 0x0989,
+	HNS3_OPC_SSU_COMMON_INT_CMD             = 0x098C,
+	HNS3_OPC_PPU_MPF_ECC_INT_CMD            = 0x0B40,
+	HNS3_OPC_PPU_MPF_OTHER_INT_CMD          = 0x0B41,
+	HNS3_OPC_PPU_PF_OTHER_INT_CMD           = 0x0B42,
+	HNS3_OPC_COMMON_ECC_INT_CFG             = 0x1505,
+	HNS3_OPC_QUERY_RAS_INT_STS_BD_NUM       = 0x1510,
+	HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT        = 0x1511,
+	HNS3_OPC_QUERY_CLEAR_PF_RAS_INT         = 0x1512,
+	HNS3_OPC_QUERY_MSIX_INT_STS_BD_NUM      = 0x1513,
+	HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT   = 0x1514,
+	HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT    = 0x1515,
+	HNS3_OPC_IGU_EGU_TNL_INT_EN             = 0x1803,
+	HNS3_OPC_IGU_COMMON_INT_EN              = 0x1806,
+	HNS3_OPC_TM_QCN_MEM_INT_CFG             = 0x1A14,
+	HNS3_OPC_PPP_CMD0_INT_CMD               = 0x2100,
+	HNS3_OPC_PPP_CMD1_INT_CMD               = 0x2101,
+	HNS3_OPC_NCSI_INT_EN                    = 0x2401,
 };
 
 #define HNS3_CMD_FLAG_IN	BIT(0)
@@ -236,6 +239,11 @@ enum hns3_opcode_type {
 #define HNS3_CMD_FLAG_NO_INTR	BIT(4)
 #define HNS3_CMD_FLAG_ERR_INTR	BIT(5)
 
+#define HNS3_MPF_RAS_INT_MIN_BD_NUM	10
+#define HNS3_PF_RAS_INT_MIN_BD_NUM	4
+#define HNS3_MPF_MSIX_INT_MIN_BD_NUM	10
+#define HNS3_PF_MSIX_INT_MIN_BD_NUM	4
+
 #define HNS3_BUF_SIZE_UNIT	256
 #define HNS3_BUF_MUL_BY		2
 #define HNS3_BUF_DIV_BY		2
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 4797cfb2f..b9ee11413 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -56,6 +56,9 @@
 #define HNS3_FUN_RST_ING_B		0
 
 #define HNS3_VECTOR0_IMP_RESET_INT_B	1
+#define HNS3_VECTOR0_IMP_CMDQ_ERR_B	4U
+#define HNS3_VECTOR0_IMP_RD_POISON_B	5U
+#define HNS3_VECTOR0_ALL_MSIX_ERR_B	6U
 
 #define HNS3_RESET_WAIT_MS	100
 #define HNS3_RESET_WAIT_CNT	200
@@ -97,12 +100,14 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 	struct hns3_hw *hw = &hns->hw;
 	uint32_t vector0_int_stats;
 	uint32_t cmdq_src_val;
+	uint32_t hw_err_src_reg;
 	uint32_t val;
 	enum hns3_evt_cause ret;
 
 	/* fetch the events from their corresponding regs */
 	vector0_int_stats = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG);
 	cmdq_src_val = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG);
+	hw_err_src_reg = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
 
 	/*
 	 * Assumption: If by any chance reset and mailbox events are reported
@@ -145,8 +150,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 	}
 
 	/* check for vector0 msix event source */
-	if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK) {
-		val = vector0_int_stats;
+	if (vector0_int_stats & HNS3_VECTOR0_REG_MSIX_MASK ||
+	    hw_err_src_reg & HNS3_RAS_REG_NFE_MASK) {
+		val = vector0_int_stats | hw_err_src_reg;
 		ret = HNS3_VECTOR0_EVENT_ERR;
 		goto out;
 	}
@@ -159,9 +165,9 @@ hns3_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
 		goto out;
 	}
 
-	if (clearval && (vector0_int_stats || cmdq_src_val))
-		hns3_warn(hw, "surprise irq ector0_int_stats:0x%x cmdq_src_val:0x%x",
-			  vector0_int_stats, cmdq_src_val);
+	if (clearval && (vector0_int_stats || cmdq_src_val || hw_err_src_reg))
+		hns3_warn(hw, "vector0_int_stats:0x%x cmdq_src_val:0x%x hw_err_src_reg:0x%x",
+			  vector0_int_stats, cmdq_src_val, hw_err_src_reg);
 	val = vector0_int_stats;
 	ret = HNS3_VECTOR0_EVENT_OTHER;
 out:
@@ -215,11 +221,14 @@ hns3_interrupt_handler(void *param)
 
 	/* vector 0 interrupt is shared with reset and mailbox source events. */
 	if (event_cause == HNS3_VECTOR0_EVENT_ERR) {
+		hns3_warn(hw, "Received err interrupt");
 		hns3_handle_msix_error(hns, &hw->reset.request);
+		hns3_handle_ras_error(hns, &hw->reset.request);
 		hns3_schedule_reset(hns);
-	} else if (event_cause == HNS3_VECTOR0_EVENT_RST)
+	} else if (event_cause == HNS3_VECTOR0_EVENT_RST) {
+		hns3_warn(hw, "Received reset interrupt");
 		hns3_schedule_reset(hns);
-	else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
+	} else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
 		hns3_dev_handle_mbx_msg(hw);
 	else
 		hns3_err(hw, "Received unknown event");
@@ -4425,6 +4434,24 @@ hns3_clear_hw(struct hns3_hw *hw)
 	return 0;
 }
 
+static void
+hns3_config_all_msix_error(struct hns3_hw *hw, bool enable)
+{
+	uint32_t val;
+
+	/*
+	 * The new firmware support report more hardware error types by
+	 * msix mode. These errors are defined as RAS errors in hardware
+	 * and belong to a different type from the MSI-x errors processed
+	 * by the network driver.
+	 *
+	 * Network driver should open the new error report on initialition
+	 */
+	val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+	hns3_set_bit(val, HNS3_VECTOR0_ALL_MSIX_ERR_B, enable ? 1 : 0);
+	hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, val);
+}
+
 static int
 hns3_init_pf(struct rte_eth_dev *eth_dev)
 {
@@ -4467,6 +4494,8 @@ hns3_init_pf(struct rte_eth_dev *eth_dev)
 		goto err_cmd_init;
 	}
 
+	hns3_config_all_msix_error(hw, true);
+
 	ret = rte_intr_callback_register(&pci_dev->intr_handle,
 					 hns3_interrupt_handler,
 					 eth_dev);
@@ -4550,6 +4579,7 @@ hns3_uninit_pf(struct rte_eth_dev *eth_dev)
 	rte_intr_disable(&pci_dev->intr_handle);
 	hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler,
 			     eth_dev);
+	hns3_config_all_msix_error(hw, false);
 	hns3_cmd_uninit(hw);
 	hns3_cmd_destroy_queue(hw);
 	hw->io_base = NULL;
@@ -5234,6 +5264,28 @@ hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
 	return reset_level;
 }
 
+static void
+hns3_record_imp_error(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+
+	reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
+		hns3_warn(hw, "Detected IMP RD poison!");
+		hns3_error_int_stats_add(hns, "IMP_RD_POISON_INT_STS");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
+		hns3_warn(hw, "Detected IMP CMDQ error!");
+		hns3_error_int_stats_add(hns, "CMDQ_MEM_ECC_INT_STS");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+}
+
 static int
 hns3_prepare_reset(struct hns3_adapter *hns)
 {
@@ -5257,6 +5309,7 @@ hns3_prepare_reset(struct hns3_adapter *hns)
 		hw->reset.stats.request_cnt++;
 		break;
 	case HNS3_IMP_RESET:
+		hns3_record_imp_error(hns);
 		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
 		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
 			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index a5405147d..ca4cade42 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -491,11 +491,35 @@ struct hns3_hw {
 #define HNS3_FLAG_VNET_BASE_SCH_MODE		2
 
 struct hns3_err_msix_intr_stats {
-	uint64_t mac_afifo_tnl_intr_cnt;
-	uint64_t ppu_mpf_abnormal_intr_st2_cnt;
-	uint64_t ssu_port_based_pf_intr_cnt;
-	uint64_t ppp_pf_abnormal_intr_cnt;
-	uint64_t ppu_pf_abnormal_intr_cnt;
+	uint64_t mac_afifo_tnl_int_cnt;
+	uint64_t ppu_mpf_abn_int_st2_msix_cnt;
+	uint64_t ssu_port_based_pf_int_cnt;
+	uint64_t ppp_pf_abnormal_int_cnt;
+	uint64_t ppu_pf_abnormal_int_msix_cnt;
+
+	uint64_t imp_tcm_ecc_int_cnt;
+	uint64_t cmdq_mem_ecc_int_cnt;
+	uint64_t imp_rd_poison_int_cnt;
+	uint64_t tqp_int_ecc_int_cnt;
+	uint64_t msix_ecc_int_cnt;
+	uint64_t ssu_ecc_multi_bit_int_0_cnt;
+	uint64_t ssu_ecc_multi_bit_int_1_cnt;
+	uint64_t ssu_common_ecc_int_cnt;
+	uint64_t igu_int_cnt;
+	uint64_t ppp_mpf_abnormal_int_st1_cnt;
+	uint64_t ppp_mpf_abnormal_int_st3_cnt;
+	uint64_t ppu_mpf_abnormal_int_st1_cnt;
+	uint64_t ppu_mpf_abn_int_st2_ras_cnt;
+	uint64_t ppu_mpf_abnormal_int_st3_cnt;
+	uint64_t tm_sch_int_cnt;
+	uint64_t qcn_fifo_int_cnt;
+	uint64_t qcn_ecc_int_cnt;
+	uint64_t ncsi_ecc_int_cnt;
+	uint64_t ssu_port_based_err_int_cnt;
+	uint64_t ssu_fifo_overflow_int_cnt;
+	uint64_t ssu_ets_tcg_int_cnt;
+	uint64_t igu_egu_tnl_int_cnt;
+	uint64_t ppu_pf_abnormal_int_ras_cnt;
 };
 
 /* vlan entry information. */
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 46d617c68..3b5cee34e 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -20,13 +20,6 @@
 
 #define SWITCH_CONTEXT_US	10
 
-/* offset in MSIX bd */
-#define MAC_ERROR_OFFSET	1
-#define PPP_PF_ERROR_OFFSET	2
-#define PPU_PF_ERROR_OFFSET	3
-#define RCB_ERROR_OFFSET	5
-#define RCB_ERROR_STATUS_OFFSET	2
-
 #define HNS3_CHECK_MERGE_CNT(val)			\
 	do {						\
 		if (val)				\
@@ -34,11 +27,11 @@
 	} while (0)
 
 static const char *reset_string[HNS3_MAX_RESET] = {
-	"none",	"vf_func", "vf_pf_func", "vf_full", "flr",
+	"none", "vf_func", "vf_pf_func", "vf_full", "flr",
 	"vf_global", "pf_func", "global", "IMP",
 };
 
-const struct hns3_hw_error mac_afifo_tnl_int[] = {
+static const struct hns3_hw_error mac_afifo_tnl_int[] = {
 	{ .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
 	  .reset_level = HNS3_NONE_RESET },
 	{ .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
@@ -71,7 +64,14 @@ const struct hns3_hw_error mac_afifo_tnl_int[] = {
 	  .reset_level = HNS3_NONE_RESET}
 };
 
-const struct hns3_hw_error ppu_mpf_abnormal_int_st2[] = {
+static const struct hns3_hw_error ppu_mpf_abnormal_int_st1[] = {
+	{ .int_msk = 0xFFFFFFFF, .msg = "rpu_rx_pkt_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_ras[] = {
 	{ .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
 	  .reset_level = HNS3_GLOBAL_RESET },
 	{ .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
@@ -102,10 +102,6 @@ const struct hns3_hw_error ppu_mpf_abnormal_int_st2[] = {
 	  .reset_level = HNS3_GLOBAL_RESET },
 	{ .int_msk = BIT(27), .msg = "wr_bus_err",
 	  .reset_level = HNS3_GLOBAL_RESET },
-	{ .int_msk = BIT(28), .msg = "reg_search_miss",
-	  .reset_level = HNS3_GLOBAL_RESET },
-	{ .int_msk = BIT(29), .msg = "rx_q_search_miss",
-	  .reset_level = HNS3_NONE_RESET },
 	{ .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
 	  .reset_level = HNS3_NONE_RESET },
 	{ .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
@@ -114,18 +110,23 @@ const struct hns3_hw_error ppu_mpf_abnormal_int_st2[] = {
 	  .reset_level = HNS3_NONE_RESET}
 };
 
-const struct hns3_hw_error ssu_port_based_pf_int[] = {
+static const struct hns3_hw_error ppu_mpf_abnormal_int_st2_msix[] = {
+	{ .int_msk = BIT(29), .msg = "rx_q_search_miss",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_port_based_pf_int[] = {
 	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
 	  .reset_level = HNS3_GLOBAL_RESET },
 	{ .int_msk = BIT(9), .msg = "low_water_line_err_port",
 	  .reset_level = HNS3_NONE_RESET },
-	{ .int_msk = BIT(10), .msg = "hi_water_line_err_port",
-	  .reset_level = HNS3_GLOBAL_RESET },
 	{ .int_msk = 0, .msg = NULL,
 	  .reset_level = HNS3_NONE_RESET}
 };
 
-const struct hns3_hw_error ppp_pf_abnormal_int[] = {
+static const struct hns3_hw_error ppp_pf_abnormal_int[] = {
 	{ .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
 	  .reset_level = HNS3_NONE_RESET },
 	{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
@@ -134,23 +135,712 @@ const struct hns3_hw_error ppp_pf_abnormal_int[] = {
 	  .reset_level = HNS3_NONE_RESET}
 };
 
-const struct hns3_hw_error ppu_pf_abnormal_int[] = {
-	{ .int_msk = BIT(0), .msg = "over_8bd_no_fe",
-	  .reset_level = HNS3_FUNC_RESET },
-	{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
-	  .reset_level = HNS3_NONE_RESET },
-	{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
-	  .reset_level = HNS3_NONE_RESET },
-	{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
-	  .reset_level = HNS3_FUNC_RESET },
-	{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
-	  .reset_level = HNS3_FUNC_RESET },
-	{ .int_msk = BIT(5), .msg = "buf_wait_timeout",
-	  .reset_level = HNS3_NONE_RESET },
-	{ .int_msk = 0, .msg = NULL,
-	  .reset_level = HNS3_NONE_RESET}
+static const struct hns3_hw_error ppu_pf_abnormal_int_ras[] = {
+	{ .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
+	  .reset_level = HNS3_FUNC_RESET },
+	{ .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
+	  .reset_level = HNS3_FUNC_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ppu_pf_abnormal_int_msix[] = {
+	{ .int_msk = BIT(0), .msg = "over_8bd_no_fe",
+	  .reset_level = HNS3_FUNC_RESET },
+	{ .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(5), .msg = "buf_wait_timeout",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error imp_tcm_ecc_int[] = {
+	{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error cmdq_mem_ecc_int[] = {
+	{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error tqp_int_ecc_int[] = {
+	{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error imp_rd_poison_int[] = {
+	{ .int_msk = BIT(0), .msg = "imp_rd_poison_int",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+#define HNS3_SSU_MEM_ECC_ERR(x) \
+	{ .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
+	  .reset_level = HNS3_GLOBAL_RESET }
+
+static const struct hns3_hw_error ssu_ecc_multi_bit_int_0[] = {
+	HNS3_SSU_MEM_ECC_ERR(0),
+	HNS3_SSU_MEM_ECC_ERR(1),
+	HNS3_SSU_MEM_ECC_ERR(2),
+	HNS3_SSU_MEM_ECC_ERR(3),
+	HNS3_SSU_MEM_ECC_ERR(4),
+	HNS3_SSU_MEM_ECC_ERR(5),
+	HNS3_SSU_MEM_ECC_ERR(6),
+	HNS3_SSU_MEM_ECC_ERR(7),
+	HNS3_SSU_MEM_ECC_ERR(8),
+	HNS3_SSU_MEM_ECC_ERR(9),
+	HNS3_SSU_MEM_ECC_ERR(10),
+	HNS3_SSU_MEM_ECC_ERR(11),
+	HNS3_SSU_MEM_ECC_ERR(12),
+	HNS3_SSU_MEM_ECC_ERR(13),
+	HNS3_SSU_MEM_ECC_ERR(14),
+	HNS3_SSU_MEM_ECC_ERR(15),
+	HNS3_SSU_MEM_ECC_ERR(16),
+	HNS3_SSU_MEM_ECC_ERR(17),
+	HNS3_SSU_MEM_ECC_ERR(18),
+	HNS3_SSU_MEM_ECC_ERR(19),
+	HNS3_SSU_MEM_ECC_ERR(20),
+	HNS3_SSU_MEM_ECC_ERR(21),
+	HNS3_SSU_MEM_ECC_ERR(22),
+	HNS3_SSU_MEM_ECC_ERR(23),
+	HNS3_SSU_MEM_ECC_ERR(24),
+	HNS3_SSU_MEM_ECC_ERR(25),
+	HNS3_SSU_MEM_ECC_ERR(26),
+	HNS3_SSU_MEM_ECC_ERR(27),
+	HNS3_SSU_MEM_ECC_ERR(28),
+	HNS3_SSU_MEM_ECC_ERR(29),
+	HNS3_SSU_MEM_ECC_ERR(30),
+	HNS3_SSU_MEM_ECC_ERR(31),
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_ecc_multi_bit_int_1[] = {
+	{ .int_msk = BIT(0), .msg = "ssu_mem32_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_common_ecc_int[] = {
+	{ .int_msk = BIT(0), .msg = "buf_sum_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(1), .msg = "ppp_mb_num_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = BIT(2), .msg = "ppp_mbid_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "cks_edit_position_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "cks_edit_condition_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "vlan_num_ot_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "vlan_num_in_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error igu_int[] = {
+	{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error msix_ecc_int[] = {
+	{ .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ppp_mpf_abnormal_int_st1[] = {
+	{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ppp_mpf_abnormal_int_st3[] = {
+	{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ppu_mpf_abnormal_int_st3[] = {
+	{ .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error tm_sch_int[] = {
+	{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error qcn_fifo_int[] = {
+	{ .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error qcn_ecc_int[] = {
+	{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ncsi_ecc_int[] = {
+	{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
+	  .reset_level = HNS3_NONE_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_fifo_overflow_int[] = {
+	{ .int_msk = BIT(0), .msg = "ig_mac_inf_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "ig_host_inf_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "ig_roc_buf_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_ets_tcg_int[] = {
+	{ .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error igu_egu_tnl_int[] = {
+	{ .int_msk = BIT(0), .msg = "rx_buf_overflow",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "tx_buf_overflow",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "tx_buf_underrun",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error ssu_port_based_err_int[] = {
+	{ .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+	  .reset_level = HNS3_FUNC_RESET },
+	{ .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
+	  .reset_level = HNS3_GLOBAL_RESET },
+	{ .int_msk = 0, .msg = NULL,
+	  .reset_level = HNS3_NONE_RESET}
+};
+
+static const struct hns3_hw_error_desc mpf_ras_err_tbl[] = {
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = "IMP_TCM_ECC_INT_STS",
+	  .hw_err = imp_tcm_ecc_int },
+	{ .desc_offset = 0, .data_offset = 1,
+	  .msg = "CMDQ_MEM_ECC_INT_STS",
+	  .hw_err = cmdq_mem_ecc_int },
+	{ .desc_offset = 0, .data_offset = 2,
+	  .msg = "IMP_RD_POISON_INT_STS",
+	  .hw_err = imp_rd_poison_int },
+	{ .desc_offset = 0, .data_offset = 3,
+	  .msg = "TQP_INT_ECC_INT_STS",
+	  .hw_err = tqp_int_ecc_int },
+	{ .desc_offset = 0, .data_offset = 4,
+	  .msg = "MSIX_ECC_INT_STS",
+	  .hw_err = msix_ecc_int },
+	{ .desc_offset = 2, .data_offset = 2,
+	  .msg = "SSU_ECC_MULTI_BIT_INT_0",
+	  .hw_err = ssu_ecc_multi_bit_int_0 },
+	{ .desc_offset = 2, .data_offset = 3,
+	  .msg = "SSU_ECC_MULTI_BIT_INT_1",
+	  .hw_err = ssu_ecc_multi_bit_int_1 },
+	{ .desc_offset = 2, .data_offset = 4,
+	  .msg = "SSU_COMMON_ERR_INT",
+	  .hw_err = ssu_common_ecc_int },
+	{ .desc_offset = 3, .data_offset = 0,
+	  .msg = "IGU_INT_STS",
+	  .hw_err = igu_int },
+	{ .desc_offset = 4, .data_offset = 1,
+	  .msg = "PPP_MPF_ABNORMAL_INT_ST1",
+	  .hw_err = ppp_mpf_abnormal_int_st1 },
+	{ .desc_offset = 4, .data_offset = 3,
+	  .msg = "PPP_MPF_ABNORMAL_INT_ST3",
+	  .hw_err = ppp_mpf_abnormal_int_st3 },
+	{ .desc_offset = 5, .data_offset = 1,
+	  .msg = "PPU_MPF_ABNORMAL_INT_ST1",
+	  .hw_err = ppu_mpf_abnormal_int_st1 },
+	{ .desc_offset = 5, .data_offset = 2,
+	  .msg = "PPU_MPF_ABNORMAL_INT_ST2_RAS",
+	  .hw_err = ppu_mpf_abnormal_int_st2_ras },
+	{ .desc_offset = 5, .data_offset = 3,
+	  .msg = "PPU_MPF_ABNORMAL_INT_ST3",
+	  .hw_err = ppu_mpf_abnormal_int_st3 },
+	{ .desc_offset = 6, .data_offset = 0,
+	  .msg = "TM_SCH_RINT",
+	  .hw_err = tm_sch_int },
+	{ .desc_offset = 7, .data_offset = 0,
+	  .msg = "QCN_FIFO_RINT",
+	  .hw_err = qcn_fifo_int },
+	{ .desc_offset = 7, .data_offset = 1,
+	  .msg = "QCN_ECC_RINT",
+	  .hw_err = qcn_ecc_int },
+	{ .desc_offset = 9, .data_offset = 0,
+	  .msg = "NCSI_ECC_INT_RPT",
+	  .hw_err = ncsi_ecc_int },
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = NULL,
+	  .hw_err = NULL }
+};
+
+static const struct hns3_hw_error_desc pf_ras_err_tbl[] = {
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = "SSU_PORT_BASED_ERR_INT_RAS",
+	  .hw_err = ssu_port_based_err_int },
+	{ .desc_offset = 0, .data_offset = 1,
+	  .msg = "SSU_FIFO_OVERFLOW_INT",
+	  .hw_err = ssu_fifo_overflow_int },
+	{ .desc_offset = 0, .data_offset = 2,
+	  .msg = "SSU_ETS_TCG_INT",
+	  .hw_err = ssu_ets_tcg_int },
+	{ .desc_offset = 1, .data_offset = 0,
+	  .msg = "IGU_EGU_TNL_INT_STS",
+	  .hw_err = igu_egu_tnl_int },
+	{ .desc_offset = 3, .data_offset = 0,
+	  .msg = "PPU_PF_ABNORMAL_INT_ST_RAS",
+	  .hw_err = ppu_pf_abnormal_int_ras },
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = NULL,
+	  .hw_err = NULL }
+};
+
+static const struct hns3_hw_error_desc mpf_msix_err_tbl[] = {
+	{ .desc_offset = 1, .data_offset = 0,
+	  .msg = "MAC_AFIFO_TNL_INT_R",
+	  .hw_err = mac_afifo_tnl_int },
+	{ .desc_offset = 5, .data_offset = 2,
+	  .msg = "PPU_MPF_ABNORMAL_INT_ST2_MSIX",
+	  .hw_err = ppu_mpf_abnormal_int_st2_msix },
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = NULL,
+	  .hw_err = NULL }
+};
+
+static const struct hns3_hw_error_desc pf_msix_err_tbl[] = {
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = "SSU_PORT_BASED_ERR_INT_MSIX",
+	  .hw_err = ssu_port_based_pf_int },
+	{ .desc_offset = 2, .data_offset = 0,
+	  .msg = "PPP_PF_ABNORMAL_INT_ST0",
+	  .hw_err = ppp_pf_abnormal_int },
+	{ .desc_offset = 3, .data_offset = 0,
+	  .msg = "PPU_PF_ABNORMAL_INT_ST_MSIX",
+	  .hw_err = ppu_pf_abnormal_int_msix },
+	{ .desc_offset = 0, .data_offset = 0,
+	  .msg = NULL,
+	  .hw_err = NULL }
+};
+
+enum hns3_hw_err_type {
+	MPF_MSIX_ERR,
+	PF_MSIX_ERR,
+	MPF_RAS_ERR,
+	PF_RAS_ERR,
 };
 
+static int
+hns3_config_ncsi_hw_err_int(struct hns3_adapter *hns, bool en)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/* configure NCSI error interrupts */
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_NCSI_INT_EN, false);
+	if (en)
+		desc.data[0] = rte_cpu_to_le_32(HNS3_NCSI_ERR_INT_EN);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "fail to %s NCSI error interrupts, ret = %d",
+			 en ? "enable" : "disable", ret);
+
+	return ret;
+}
+
+static int
+enable_igu_egu_err_intr(struct hns3_adapter *hns, bool en)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/* configure IGU,EGU error interrupts */
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_COMMON_INT_EN, false);
+	if (en)
+		desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_ENABLE);
+	else
+		desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_DISABLE);
+
+	desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_ERR_INT_EN_MASK);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "fail to %s IGU common interrupts, ret = %d",
+			 en ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_IGU_EGU_TNL_INT_EN, false);
+	if (en)
+		desc.data[0] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN);
+
+	desc.data[1] = rte_cpu_to_le_32(HNS3_IGU_TNL_ERR_INT_EN_MASK);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "fail to %s IGU-EGU TNL interrupts, ret = %d",
+			 en ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	return hns3_config_ncsi_hw_err_int(hns, en);
+}
+
 static int
 config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en)
 {
@@ -163,7 +853,7 @@ config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en)
 	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
 	hns3_cmd_setup_basic_desc(&desc[1], cmd, false);
 
-	if (cmd == HNS3_PPP_CMD0_INT_CMD) {
+	if (cmd == HNS3_OPC_PPP_CMD0_INT_CMD) {
 		if (en) {
 			desc[0].data[0] =
 				rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT0_EN);
@@ -179,7 +869,7 @@ config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en)
 			rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT1_EN_MASK);
 		desc[1].data[2] =
 			rte_cpu_to_le_32(HNS3_PPP_PF_ERR_INT_EN_MASK);
-	} else if (cmd == HNS3_PPP_CMD1_INT_CMD) {
+	} else if (cmd == HNS3_OPC_PPP_CMD1_INT_CMD) {
 		if (en) {
 			desc[0].data[0] =
 				rte_cpu_to_le_32(HNS3_PPP_MPF_ECC_ERR_INT2_EN);
@@ -195,7 +885,8 @@ config_ppp_err_intr(struct hns3_adapter *hns, uint32_t cmd, bool en)
 
 	ret = hns3_cmd_send(hw, &desc[0], 2);
 	if (ret)
-		hns3_err(hw, "fail to configure PPP error int: %d", ret);
+		hns3_err(hw, "fail to %s PPP error int, ret = %d",
+		en ? "enable" : "disable", ret);
 
 	return ret;
 }
@@ -205,11 +896,11 @@ enable_ppp_err_intr(struct hns3_adapter *hns, bool en)
 {
 	int ret;
 
-	ret = config_ppp_err_intr(hns, HNS3_PPP_CMD0_INT_CMD, en);
+	ret = config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD0_INT_CMD, en);
 	if (ret)
 		return ret;
 
-	return config_ppp_err_intr(hns, HNS3_PPP_CMD1_INT_CMD, en);
+	return config_ppp_err_intr(hns, HNS3_OPC_PPP_CMD1_INT_CMD, en);
 }
 
 static int
@@ -220,9 +911,9 @@ enable_ssu_err_intr(struct hns3_adapter *hns, bool en)
 	int ret;
 
 	/* configure SSU ecc error interrupts */
-	hns3_cmd_setup_basic_desc(&desc[0], HNS3_SSU_ECC_INT_CMD, false);
+	hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_ECC_INT_CMD, false);
 	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
-	hns3_cmd_setup_basic_desc(&desc[1], HNS3_SSU_ECC_INT_CMD, false);
+	hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_ECC_INT_CMD, false);
 	if (en) {
 		desc[0].data[0] =
 			rte_cpu_to_le_32(HNS3_SSU_1BIT_ECC_ERR_INT_EN);
@@ -239,15 +930,15 @@ enable_ssu_err_intr(struct hns3_adapter *hns, bool en)
 
 	ret = hns3_cmd_send(hw, &desc[0], 2);
 	if (ret) {
-		hns3_err(hw, "fail to configure SSU ECC error interrupt: %d",
-			 ret);
+		hns3_err(hw, "fail to %s SSU ECC error interrupt, ret = %d",
+			 en ? "enable" : "disable", ret);
 		return ret;
 	}
 
 	/* configure SSU common error interrupts */
-	hns3_cmd_setup_basic_desc(&desc[0], HNS3_SSU_COMMON_INT_CMD, false);
+	hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_SSU_COMMON_INT_CMD, false);
 	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
-	hns3_cmd_setup_basic_desc(&desc[1], HNS3_SSU_COMMON_INT_CMD, false);
+	hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_SSU_COMMON_INT_CMD, false);
 
 	if (en) {
 		desc[0].data[0] = rte_cpu_to_le_32(HNS3_SSU_COMMON_INT_EN);
@@ -264,8 +955,8 @@ enable_ssu_err_intr(struct hns3_adapter *hns, bool en)
 
 	ret = hns3_cmd_send(hw, &desc[0], 2);
 	if (ret)
-		hns3_err(hw, "fail to configure SSU COMMON error intr: %d",
-			 ret);
+		hns3_err(hw, "fail to %s SSU COMMON error intr, ret = %d",
+			 en ? "enable" : "disable", ret);
 
 	return ret;
 }
@@ -279,7 +970,7 @@ config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en)
 
 	/* configure PPU error interrupts */
 	switch (cmd) {
-	case HNS3_PPU_MPF_ECC_INT_CMD:
+	case HNS3_OPC_PPU_MPF_ECC_INT_CMD:
 		hns3_cmd_setup_basic_desc(&desc[0], cmd, false);
 		desc[0].flag |= HNS3_CMD_FLAG_NEXT;
 		hns3_cmd_setup_basic_desc(&desc[1], cmd, false);
@@ -296,14 +987,14 @@ config_ppu_err_intrs(struct hns3_adapter *hns, uint32_t cmd, bool en)
 		desc[1].data[3] |= HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK;
 		num = 2;
 		break;
-	case HNS3_PPU_MPF_OTHER_INT_CMD:
+	case HNS3_OPC_PPU_MPF_OTHER_INT_CMD:
 		hns3_cmd_setup_basic_desc(&desc[0], cmd, false);
 		if (en)
 			desc[0].data[0] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2;
 
 		desc[0].data[2] = HNS3_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
 		break;
-	case HNS3_PPU_PF_OTHER_INT_CMD:
+	case HNS3_OPC_PPU_PF_OTHER_INT_CMD:
 		hns3_cmd_setup_basic_desc(&desc[0], cmd, false);
 		if (en)
 			desc[0].data[0] = HNS3_PPU_PF_ABNORMAL_INT_EN;
@@ -326,24 +1017,104 @@ enable_ppu_err_intr(struct hns3_adapter *hns, bool en)
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
-	ret = config_ppu_err_intrs(hns, HNS3_PPU_MPF_ECC_INT_CMD, en);
+	ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_ECC_INT_CMD, en);
 	if (ret) {
-		hns3_err(hw, "fail to configure PPU MPF ECC error intr: %d",
-			 ret);
+		hns3_err(hw, "fail to %s PPU MPF ECC error intr, ret = %d",
+			 en ? "enable" : "disable", ret);
 		return ret;
 	}
 
-	ret = config_ppu_err_intrs(hns, HNS3_PPU_MPF_OTHER_INT_CMD, en);
+	ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_MPF_OTHER_INT_CMD, en);
 	if (ret) {
-		hns3_err(hw, "fail to configure PPU MPF other intr: %d",
-			 ret);
+		hns3_err(hw, "fail to %s PPU MPF other intr, ret = %d",
+			 en ? "enable" : "disable", ret);
 		return ret;
 	}
 
-	ret = config_ppu_err_intrs(hns, HNS3_PPU_PF_OTHER_INT_CMD, en);
+	ret = config_ppu_err_intrs(hns, HNS3_OPC_PPU_PF_OTHER_INT_CMD, en);
 	if (ret)
-		hns3_err(hw, "fail to configure PPU PF error interrupts: %d",
-			 ret);
+		hns3_err(hw, "fail to %s PPU PF error interrupts, ret = %d",
+			 en ? "enable" : "disable", ret);
+	return ret;
+}
+
+static int
+enable_tm_err_intr(struct hns3_adapter *hns, bool en)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/* configure TM SCH error interrupts */
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_SCH_ECC_INT_EN, false);
+	if (en)
+		desc.data[0] = rte_cpu_to_le_32(HNS3_TM_SCH_ECC_ERR_INT_EN);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "fail to %s TM SCH interrupts, ret = %d",
+			 en ? "enable" : "disable", ret);
+		return ret;
+	}
+
+	/* configure TM QCN hw errors */
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_TM_QCN_MEM_INT_CFG, true);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "fail to read TM QCN CFG status, ret = %d\n", ret);
+		return ret;
+	}
+
+	hns3_cmd_reuse_desc(&desc, false);
+	if (en)
+		desc.data[1] = rte_cpu_to_le_32(HNS3_TM_QCN_MEM_ERR_INT_EN);
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "fail to %s TM QCN mem errors, ret = %d\n",
+			 en ? "enable" : "disable", ret);
+
+	return ret;
+}
+
+static int
+enable_common_err_intr(struct hns3_adapter *hns, bool en)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc desc[2];
+	int ret;
+
+	/* configure common error interrupts */
+	hns3_cmd_setup_basic_desc(&desc[0], HNS3_OPC_COMMON_ECC_INT_CFG, false);
+	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	hns3_cmd_setup_basic_desc(&desc[1], HNS3_OPC_COMMON_ECC_INT_CFG, false);
+
+	if (en) {
+		desc[0].data[0] =
+			rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN);
+		desc[0].data[2] =
+			rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN);
+		desc[0].data[3] =
+			rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN);
+		desc[0].data[4] =
+			rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN |
+					 HNS3_MSIX_SRAM_ECC_ERR_INT_EN);
+		desc[0].data[5] =
+			rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN);
+	}
+
+	desc[1].data[0] = rte_cpu_to_le_32(HNS3_IMP_TCM_ECC_ERR_INT_EN_MASK);
+	desc[1].data[2] = rte_cpu_to_le_32(HNS3_CMDQ_NIC_ECC_ERR_INT_EN_MASK);
+	desc[1].data[3] = rte_cpu_to_le_32(HNS3_IMP_RD_POISON_ERR_INT_EN_MASK);
+	desc[1].data[4] = rte_cpu_to_le_32(HNS3_TQP_ECC_ERR_INT_EN_MASK |
+				      HNS3_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
+	desc[1].data[5] = rte_cpu_to_le_32(HNS3_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+	ret = hns3_cmd_send(hw, &desc[0], RTE_DIM(desc));
+	if (ret)
+		hns3_err(hw, "fail to %s common err interrupts, ret = %d\n",
+			 en ? "enable" : "disable", ret);
+
 	return ret;
 }
 
@@ -355,7 +1126,7 @@ enable_mac_err_intr(struct hns3_adapter *hns, bool en)
 	int ret;
 
 	/* configure MAC common error interrupts */
-	hns3_cmd_setup_basic_desc(&desc, HNS3_MAC_COMMON_INT_EN, false);
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MAC_COMMON_INT_EN, false);
 	if (en)
 		desc.data[0] = rte_cpu_to_le_32(HNS3_MAC_COMMON_ERR_INT_EN);
 
@@ -363,13 +1134,17 @@ enable_mac_err_intr(struct hns3_adapter *hns, bool en)
 
 	ret = hns3_cmd_send(hw, &desc, 1);
 	if (ret)
-		hns3_err(hw, "fail to configure MAC COMMON error intr: %d",
-			 ret);
+		hns3_err(hw, "fail to %s MAC COMMON error intr: %d",
+			 en ? "enable" : "disable", ret);
 
 	return ret;
 }
 
 static const struct hns3_hw_blk hw_blk[] = {
+	{
+		.name = "IGU_EGU",
+		.enable_err_intr = enable_igu_egu_err_intr,
+	},
 	{
 		.name = "PPP",
 		.enable_err_intr = enable_ppp_err_intr,
@@ -382,6 +1157,14 @@ static const struct hns3_hw_blk hw_blk[] = {
 		.name = "PPU",
 		.enable_err_intr = enable_ppu_err_intr,
 	},
+	{
+		.name = "TM",
+		.enable_err_intr = enable_tm_err_intr,
+	},
+	{
+		.name = "COMMON",
+		.enable_err_intr = enable_common_err_intr,
+	},
 	{
 		.name = "MAC",
 		.enable_err_intr = enable_mac_err_intr,
@@ -426,6 +1209,7 @@ hns3_find_highest_level(struct hns3_adapter *hns, const char *reg,
 				reset_level = err->reset_level;
 				need_reset = true;
 			}
+			hns3_error_int_stats_add(hns, reg);
 		}
 		err++;
 	}
@@ -436,224 +1220,248 @@ hns3_find_highest_level(struct hns3_adapter *hns, const char *reg,
 }
 
 static int
-query_num_bds_in_msix(struct hns3_hw *hw, struct hns3_cmd_desc *desc_bd)
+query_num_bds(struct hns3_hw *hw, bool is_ras, uint32_t *mpf_bd_num,
+	      uint32_t *pf_bd_num)
 {
+	uint32_t mpf_min_bd_num, pf_min_bd_num;
+	uint32_t mpf_bd_num_val, pf_bd_num_val;
+	enum hns3_opcode_type opcode;
+	struct hns3_cmd_desc desc;
 	int ret;
 
-	hns3_cmd_setup_basic_desc(desc_bd, HNS3_QUERY_MSIX_INT_STS_BD_NUM,
-				  true);
-	ret = hns3_cmd_send(hw, desc_bd, 1);
-	if (ret)
-		hns3_err(hw, "query num bds in msix failed: %d", ret);
-
-	return ret;
-}
+	if (is_ras) {
+		opcode = HNS3_OPC_QUERY_RAS_INT_STS_BD_NUM;
+		mpf_min_bd_num = HNS3_MPF_RAS_INT_MIN_BD_NUM;
+		pf_min_bd_num = HNS3_PF_RAS_INT_MIN_BD_NUM;
+	} else {
+		opcode = HNS3_OPC_QUERY_MSIX_INT_STS_BD_NUM;
+		mpf_min_bd_num = HNS3_MPF_MSIX_INT_MIN_BD_NUM;
+		pf_min_bd_num = HNS3_PF_MSIX_INT_MIN_BD_NUM;
+	}
 
-static int
-query_all_mpf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
-		       uint32_t mpf_bd_num)
-{
-	int ret;
+	hns3_cmd_setup_basic_desc(&desc, opcode, true);
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "query num bds in msix failed, ret = %d", ret);
+		return ret;
+	}
 
-	hns3_cmd_setup_basic_desc(desc, HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT,
-				  true);
-	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	mpf_bd_num_val = rte_le_to_cpu_32(desc.data[0]);
+	pf_bd_num_val = rte_le_to_cpu_32(desc.data[1]);
+	if (mpf_bd_num_val < mpf_min_bd_num || pf_bd_num_val < pf_min_bd_num) {
+		hns3_err(hw, "error bd num: mpf(%u), min_mpf(%u), "
+			 "pf(%u), min_pf(%u)\n", mpf_bd_num_val, mpf_min_bd_num,
+			 pf_bd_num_val, pf_min_bd_num);
+		return -EINVAL;
+	}
 
-	ret = hns3_cmd_send(hw, &desc[0], mpf_bd_num);
-	if (ret)
-		hns3_err(hw, "query all mpf msix err failed: %d", ret);
+	*mpf_bd_num = mpf_bd_num_val;
+	*pf_bd_num = pf_bd_num_val;
 
-	return ret;
+	return 0;
 }
 
-static int
-clear_all_mpf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
-		       uint32_t mpf_bd_num)
+void
+hns3_intr_unregister(const struct rte_intr_handle *hdl,
+		     rte_intr_callback_fn cb_fn, void *cb_arg)
 {
+	int retry_cnt = 0;
 	int ret;
 
-	hns3_cmd_reuse_desc(desc, false);
-	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
-
-	ret = hns3_cmd_send(hw, desc, mpf_bd_num);
-	if (ret)
-		hns3_err(hw, "clear all mpf msix err failed: %d", ret);
-
-	return ret;
+	do {
+		ret = rte_intr_callback_unregister(hdl, cb_fn, cb_arg);
+		if (ret >= 0) {
+			break;
+		} else if (ret != -EAGAIN) {
+			PMD_INIT_LOG(ERR, "Failed to unregister intr: %d", ret);
+			break;
+		}
+		rte_delay_ms(HNS3_INTR_UNREG_FAIL_DELAY_MS);
+	} while (retry_cnt++ < HNS3_INTR_UNREG_FAIL_RETRY_CNT);
 }
 
-static int
-query_all_pf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
-		      uint32_t pf_bd_num)
+static uint32_t
+hns3_get_hw_error_status(struct hns3_cmd_desc *desc, uint8_t desc_offset,
+			 uint8_t data_offset)
 {
-	int ret;
-
-	hns3_cmd_setup_basic_desc(desc, HNS3_QUERY_CLEAR_ALL_PF_MSIX_INT, true);
-	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	uint32_t status;
+	uint32_t *desc_data;
 
-	ret = hns3_cmd_send(hw, desc, pf_bd_num);
-	if (ret)
-		hns3_err(hw, "query all pf msix int cmd failed: %d", ret);
+	if (desc_offset == 0)
+		status = rte_le_to_cpu_32(desc[desc_offset].data[data_offset]);
+	else {
+		desc_data = (uint32_t *)&desc[desc_offset];
+		status = rte_le_to_cpu_32(*(desc_data + data_offset));
+	}
 
-	return ret;
+	return status;
 }
 
 static int
-clear_all_pf_msix_err(struct hns3_hw *hw, struct hns3_cmd_desc *desc,
-		      uint32_t pf_bd_num)
+hns3_handle_hw_error(struct hns3_adapter *hns, struct hns3_cmd_desc *desc,
+		     int num, uint64_t *levels, enum hns3_hw_err_type err_type)
 {
+	const struct hns3_hw_error_desc *err = pf_ras_err_tbl;
+	enum hns3_opcode_type opcode;
+	enum hns3_reset_level req_level;
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t status;
 	int ret;
 
-	hns3_cmd_reuse_desc(desc, false);
-	desc[0].flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_NEXT);
+	switch (err_type) {
+	case MPF_MSIX_ERR:
+		err = mpf_msix_err_tbl;
+		opcode = HNS3_OPC_QUERY_CLEAR_ALL_MPF_MSIX_INT;
+		break;
+	case PF_MSIX_ERR:
+		err = pf_msix_err_tbl;
+		opcode = HNS3_OPC_QUERY_CLEAR_ALL_PF_MSIX_INT;
+		break;
+	case MPF_RAS_ERR:
+		err = mpf_ras_err_tbl;
+		opcode = HNS3_OPC_QUERY_CLEAR_MPF_RAS_INT;
+		break;
+	case PF_RAS_ERR:
+		err = pf_ras_err_tbl;
+		opcode = HNS3_OPC_QUERY_CLEAR_PF_RAS_INT;
+		break;
+	default:
+		hns3_err(hw, "error hardware err_type = %d\n", err_type);
+		return -EINVAL;
+	}
 
-	ret = hns3_cmd_send(hw, desc, pf_bd_num);
-	if (ret)
-		hns3_err(hw, "clear all pf msix err failed: %d", ret);
+	/* query all hardware errors */
+	hns3_cmd_setup_basic_desc(&desc[0], opcode, true);
+	ret = hns3_cmd_send(hw, &desc[0], num);
+	if (ret) {
+		hns3_err(hw, "query hw err int 0x%x cmd failed, ret = %d\n",
+			 opcode, ret);
+		return ret;
+	}
 
-	return ret;
-}
+	/* traverses the error table and process based on the error type */
+	while (err->msg) {
+		status = hns3_get_hw_error_status(desc, err->desc_offset,
+						  err->data_offset);
+		if (status) {
+			/*
+			 * set the reset_level or non_reset flag based on
+			 * the error type and add error statistics. here just
+			 * set the flag, the actual reset action is in
+			 * hns3_msix_process.
+			 */
+			req_level = hns3_find_highest_level(hns, err->msg,
+							    err->hw_err,
+							    status);
+			hns3_atomic_set_bit(req_level, levels);
+		}
+		err++;
+	}
 
-void
-hns3_intr_unregister(const struct rte_intr_handle *hdl,
-		     rte_intr_callback_fn cb_fn, void *cb_arg)
-{
-	int retry_cnt = 0;
-	int ret;
+	/* clear all hardware errors */
+	hns3_cmd_reuse_desc(&desc[0], false);
+	ret = hns3_cmd_send(hw, &desc[0], num);
+	if (ret)
+		hns3_err(hw, "clear all hw err int cmd failed, ret = %d\n",
+			 ret);
 
-	do {
-		ret = rte_intr_callback_unregister(hdl, cb_fn, cb_arg);
-		if (ret >= 0) {
-			break;
-		} else if (ret != -EAGAIN) {
-			PMD_INIT_LOG(ERR, "Failed to unregister intr: %d", ret);
-			break;
-		}
-		rte_delay_ms(HNS3_INTR_UNREG_FAIL_DELAY_MS);
-	} while (retry_cnt++ < HNS3_INTR_UNREG_FAIL_RETRY_CNT);
+	return ret;
 }
 
 void
 hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels)
 {
 	uint32_t mpf_bd_num, pf_bd_num, bd_num;
-	enum hns3_reset_level req_level;
 	struct hns3_hw *hw = &hns->hw;
-	struct hns3_pf *pf = &hns->pf;
-	struct hns3_cmd_desc desc_bd;
 	struct hns3_cmd_desc *desc;
-	uint32_t *desc_data;
-	uint32_t status;
 	int ret;
 
 	/* query the number of bds for the MSIx int status */
-	ret = query_num_bds_in_msix(hw, &desc_bd);
+	ret = query_num_bds(hw, false, &mpf_bd_num, &pf_bd_num);
 	if (ret) {
-		hns3_err(hw, "fail to query msix int status bd num: %d", ret);
-		return;
-	}
-
-	mpf_bd_num = rte_le_to_cpu_32(desc_bd.data[0]);
-	pf_bd_num = rte_le_to_cpu_32(desc_bd.data[1]);
-	bd_num = max_t(uint32_t, mpf_bd_num, pf_bd_num);
-	if (bd_num < RCB_ERROR_OFFSET) {
-		hns3_err(hw, "bd_num is less than RCB_ERROR_OFFSET: %u",
-			 bd_num);
+		hns3_err(hw, "fail to query msix int status bd num: ret = %d",
+			 ret);
 		return;
 	}
 
+	bd_num = RTE_MAX(mpf_bd_num, pf_bd_num);
 	desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0);
 	if (desc == NULL) {
-		hns3_err(hw, "fail to zmalloc desc");
+		hns3_err(hw,
+			 "fail to zmalloc desc for handling msix error, size = %lu",
+			 bd_num * sizeof(struct hns3_cmd_desc));
 		return;
 	}
 
-	/* query all main PF MSIx errors */
-	ret = query_all_mpf_msix_err(hw, &desc[0], mpf_bd_num);
+	/* handle all main PF MSIx errors */
+	ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_MSIX_ERR);
 	if (ret) {
-		hns3_err(hw, "query all mpf msix int cmd failed: %d", ret);
+		hns3_err(hw, "fail to handle all main pf msix errors, ret = %d",
+			 ret);
 		goto out;
 	}
 
-	/* log MAC errors */
-	desc_data = (uint32_t *)&desc[MAC_ERROR_OFFSET];
-	status = rte_le_to_cpu_32(*desc_data);
-	if (status) {
-		req_level = hns3_find_highest_level(hns, "MAC_AFIFO_TNL_INT_R",
-						    mac_afifo_tnl_int,
-						    status);
-		hns3_atomic_set_bit(req_level, levels);
-		pf->abn_int_stats.mac_afifo_tnl_intr_cnt++;
-	}
-
-	/* log PPU(RCB) errors */
-	desc_data = (uint32_t *)&desc[RCB_ERROR_OFFSET];
-	status = rte_le_to_cpu_32(*(desc_data + RCB_ERROR_STATUS_OFFSET)) &
-			HNS3_PPU_MPF_INT_ST2_MSIX_MASK;
-	if (status) {
-		req_level = hns3_find_highest_level(hns,
-						    "PPU_MPF_ABNORMAL_INT_ST2",
-						    ppu_mpf_abnormal_int_st2,
-						    status);
-		hns3_atomic_set_bit(req_level, levels);
-		pf->abn_int_stats.ppu_mpf_abnormal_intr_st2_cnt++;
-	}
+	memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc));
 
-	/* clear all main PF MSIx errors */
-	ret = clear_all_mpf_msix_err(hw, desc, mpf_bd_num);
+	/* handle all PF MSIx errors */
+	ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_MSIX_ERR);
 	if (ret) {
-		hns3_err(hw, "clear all mpf msix int cmd failed: %d", ret);
+		hns3_err(hw, "fail to handle all pf msix errors, ret = %d",
+			 ret);
 		goto out;
 	}
 
-	/* query all PF MSIx errors */
-	memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc));
-	ret = query_all_pf_msix_err(hw, &desc[0], pf_bd_num);
+out:
+	rte_free(desc);
+}
+
+void
+hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels)
+{
+	uint32_t mpf_bd_num, pf_bd_num, bd_num;
+	struct hns3_hw *hw = &hns->hw;
+	struct hns3_cmd_desc *desc;
+	uint32_t status;
+	int ret;
+
+	status = hns3_read_dev(hw, HNS3_RAS_PF_OTHER_INT_STS_REG);
+	if ((status & HNS3_RAS_REG_NFE_MASK) == 0)
+		return;
+
+	/* query the number of bds for the RAS int status */
+	ret = query_num_bds(hw, true, &mpf_bd_num, &pf_bd_num);
 	if (ret) {
-		hns3_err(hw, "query all pf msix int cmd failed (%d)", ret);
-		goto out;
+		hns3_err(hw, "fail to query ras int status bd num: ret = %d",
+			 ret);
+		return;
 	}
 
-	/* log SSU PF errors */
-	status = rte_le_to_cpu_32(desc[0].data[0]) &
-		 HNS3_SSU_PORT_INT_MSIX_MASK;
-	if (status) {
-		req_level = hns3_find_highest_level(hns,
-						    "SSU_PORT_BASED_ERR_INT",
-						    ssu_port_based_pf_int,
-						    status);
-		hns3_atomic_set_bit(req_level, levels);
-		pf->abn_int_stats.ssu_port_based_pf_intr_cnt++;
+	bd_num = RTE_MAX(mpf_bd_num, pf_bd_num);
+	desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0);
+	if (desc == NULL) {
+		hns3_err(hw,
+			 "fail to zmalloc desc for handing ras error, size = %lu",
+			 bd_num * sizeof(struct hns3_cmd_desc));
+		return;
 	}
 
-	/* log PPP PF errors */
-	desc_data = (uint32_t *)&desc[PPP_PF_ERROR_OFFSET];
-	status = rte_le_to_cpu_32(*desc_data);
-	if (status) {
-		req_level = hns3_find_highest_level(hns,
-						    "PPP_PF_ABNORMAL_INT_ST0",
-						    ppp_pf_abnormal_int,
-						    status);
-		hns3_atomic_set_bit(req_level, levels);
-		pf->abn_int_stats.ppp_pf_abnormal_intr_cnt++;
+	/* handle all main PF RAS errors */
+	ret = hns3_handle_hw_error(hns, desc, mpf_bd_num, levels, MPF_RAS_ERR);
+	if (ret) {
+		hns3_err(hw, "fail to handle all main pf ras errors, ret = %d",
+			 ret);
+		goto out;
 	}
 
-	/* log PPU(RCB) PF errors */
-	desc_data = (uint32_t *)&desc[PPU_PF_ERROR_OFFSET];
-	status = rte_le_to_cpu_32(*desc_data) & HNS3_PPU_PF_INT_MSIX_MASK;
-	if (status) {
-		req_level = hns3_find_highest_level(hns,
-						    "PPU_PF_ABNORMAL_INT_ST",
-						    ppu_pf_abnormal_int,
-						    status);
-		hns3_atomic_set_bit(req_level, levels);
-		pf->abn_int_stats.ppu_pf_abnormal_intr_cnt++;
+	memset(desc, 0, bd_num * sizeof(struct hns3_cmd_desc));
+
+	/* handle all PF RAS errors */
+	ret = hns3_handle_hw_error(hns, desc, pf_bd_num, levels, PF_RAS_ERR);
+	if (ret) {
+		hns3_err(hw, "fail to handle all pf ras errors, ret = %d", ret);
+		goto out;
 	}
 
-	/* clear all PF MSIx errors */
-	ret = clear_all_pf_msix_err(hw, desc, pf_bd_num);
-	if (ret)
-		hns3_err(hw, "clear all pf msix int cmd failed: %d", ret);
 out:
 	rte_free(desc);
 }
diff --git a/drivers/net/hns3/hns3_intr.h b/drivers/net/hns3/hns3_intr.h
index d0af16c50..2b802bc7c 100644
--- a/drivers/net/hns3/hns3_intr.h
+++ b/drivers/net/hns3/hns3_intr.h
@@ -19,6 +19,22 @@
 #define HNS3_MAC_COMMON_ERR_INT_EN		0x107FF
 #define HNS3_MAC_COMMON_ERR_INT_EN_MASK		0x107FF
 
+#define HNS3_IMP_TCM_ECC_ERR_INT_EN		0xFFFF0000
+#define HNS3_IMP_TCM_ECC_ERR_INT_EN_MASK	0xFFFF0000
+#define HNS3_IMP_ITCM4_ECC_ERR_INT_EN		0x300
+#define HNS3_IMP_ITCM4_ECC_ERR_INT_EN_MASK	0x300
+#define HNS3_IMP_RD_POISON_ERR_INT_EN		0x0100
+#define HNS3_IMP_RD_POISON_ERR_INT_EN_MASK	0x0100
+
+#define HNS3_CMDQ_NIC_ECC_ERR_INT_EN		0xFFFF
+#define HNS3_CMDQ_NIC_ECC_ERR_INT_EN_MASK	0xFFFF
+
+#define HNS3_TQP_ECC_ERR_INT_EN			0x0FFF
+#define HNS3_TQP_ECC_ERR_INT_EN_MASK		0x0FFF
+
+#define HNS3_MSIX_SRAM_ECC_ERR_INT_EN		0x0F000000
+#define HNS3_MSIX_SRAM_ECC_ERR_INT_EN_MASK	0x0F000000
+
 #define HNS3_PPU_MPF_ABNORMAL_INT0_EN		GENMASK(31, 0)
 #define HNS3_PPU_MPF_ABNORMAL_INT0_EN_MASK	GENMASK(31, 0)
 #define HNS3_PPU_MPF_ABNORMAL_INT1_EN		GENMASK(31, 0)
@@ -31,8 +47,6 @@
 #define HNS3_PPU_MPF_ABNORMAL_INT3_EN_MASK	GENMASK(23, 16)
 #define HNS3_PPU_PF_ABNORMAL_INT_EN		GENMASK(5, 0)
 #define HNS3_PPU_PF_ABNORMAL_INT_EN_MASK	GENMASK(5, 0)
-#define HNS3_PPU_PF_INT_MSIX_MASK		0x27
-#define HNS3_PPU_MPF_INT_ST2_MSIX_MASK		GENMASK(29, 28)
 
 #define HNS3_SSU_1BIT_ECC_ERR_INT_EN		GENMASK(31, 0)
 #define HNS3_SSU_1BIT_ECC_ERR_INT_EN_MASK	GENMASK(31, 0)
@@ -46,8 +60,17 @@
 #define HNS3_SSU_PORT_BASED_ERR_INT_EN_MASK	0x0BFF0000
 #define HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN	GENMASK(23, 0)
 #define HNS3_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK	GENMASK(23, 0)
-#define HNS3_SSU_COMMON_ERR_INT_MASK		GENMASK(9, 0)
-#define HNS3_SSU_PORT_INT_MSIX_MASK		0x7BFF
+
+#define HNS3_IGU_ERR_INT_ENABLE			0x0000066F
+#define HNS3_IGU_ERR_INT_DISABLE		0x00000660
+#define HNS3_IGU_ERR_INT_EN_MASK		0x000F
+#define HNS3_IGU_TNL_ERR_INT_EN			0x0002AABF
+#define HNS3_IGU_TNL_ERR_INT_EN_MASK		0x003F
+
+#define HNS3_NCSI_ERR_INT_EN			0x3
+
+#define HNS3_TM_SCH_ECC_ERR_INT_EN		0x3
+#define HNS3_TM_QCN_MEM_ERR_INT_EN		0xFFFFFF
 
 #define HNS3_RESET_PROCESS_MS			200
 
@@ -62,8 +85,17 @@ struct hns3_hw_error {
 	enum hns3_reset_level reset_level;
 };
 
+struct hns3_hw_error_desc {
+	uint8_t desc_offset;
+	uint8_t data_offset;
+	const char *msg;
+	const struct hns3_hw_error *hw_err;
+};
+
 int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool state);
 void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels);
+void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels);
+
 void hns3_intr_unregister(const struct rte_intr_handle *hdl,
 			  rte_intr_callback_fn cb_fn, void *cb_arg);
 void hns3_notify_reset_ready(struct hns3_hw *hw, bool enable);
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index bf6df6300..5cf924e19 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -27,6 +27,9 @@
 
 #define HNS3_VECTOR0_OTHER_INT_STS_REG	0x20800
 
+#define HNS3_RAS_PF_OTHER_INT_STS_REG	0x20B00
+#define HNS3_RAS_REG_NFE_MASK		0xFF00
+
 #define HNS3_MISC_VECTOR_REG_BASE	0x20400
 #define HNS3_VECTOR0_OTER_EN_REG	0x20600
 #define HNS3_MISC_RESET_STS_REG		0x20700
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index d2467a484..f2918fc6a 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -189,15 +189,61 @@ static const struct hns3_xstats_name_offset hns3_mac_strings[] = {
 
 static const struct hns3_xstats_name_offset hns3_error_int_stats_strings[] = {
 	{"MAC_AFIFO_TNL_INT_R",
-		HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_intr_cnt)},
-	{"PPU_MPF_ABNORMAL_INT_ST2",
-		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_intr_st2_cnt)},
-	{"SSU_PORT_BASED_ERR_INT",
-		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_intr_cnt)},
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(mac_afifo_tnl_int_cnt)},
+	{"PPU_MPF_ABNORMAL_INT_ST2_MSIX",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_msix_cnt)},
+	{"SSU_PORT_BASED_ERR_INT_MSIX",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_pf_int_cnt)},
 	{"PPP_PF_ABNORMAL_INT_ST0",
-		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_intr_cnt)},
-	{"PPU_PF_ABNORMAL_INT_ST",
-		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_intr_cnt)}
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_pf_abnormal_int_cnt)},
+	{"PPU_PF_ABNORMAL_INT_ST_MSIX",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_msix_cnt)},
+	{"IMP_TCM_ECC_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_tcm_ecc_int_cnt)},
+	{"CMDQ_MEM_ECC_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(cmdq_mem_ecc_int_cnt)},
+	{"IMP_RD_POISON_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(imp_rd_poison_int_cnt)},
+	{"TQP_INT_ECC_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(tqp_int_ecc_int_cnt)},
+	{"MSIX_ECC_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(msix_ecc_int_cnt)},
+	{"SSU_ECC_MULTI_BIT_INT_0",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_0_cnt)},
+	{"SSU_ECC_MULTI_BIT_INT_1",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ecc_multi_bit_int_1_cnt)},
+	{"SSU_COMMON_ERR_INT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_common_ecc_int_cnt)},
+	{"IGU_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_int_cnt)},
+	{"PPP_MPF_ABNORMAL_INT_ST1",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st1_cnt)},
+	{"PPP_MPF_ABNORMAL_INT_ST3",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppp_mpf_abnormal_int_st3_cnt)},
+	{"PPU_MPF_ABNORMAL_INT_ST1",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st1_cnt)},
+	{"PPU_MPF_ABNORMAL_INT_ST2_RAS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abn_int_st2_ras_cnt)},
+	{"PPU_MPF_ABNORMAL_INT_ST3",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_mpf_abnormal_int_st3_cnt)},
+	{"TM_SCH_RINT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(tm_sch_int_cnt)},
+	{"QCN_FIFO_RINT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_fifo_int_cnt)},
+	{"QCN_ECC_RINT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(qcn_ecc_int_cnt)},
+	{"NCSI_ECC_INT_RPT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ncsi_ecc_int_cnt)},
+	{"SSU_PORT_BASED_ERR_INT_RAS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_port_based_err_int_cnt)},
+	{"SSU_FIFO_OVERFLOW_INT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_fifo_overflow_int_cnt)},
+	{"SSU_ETS_TCG_INT",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ssu_ets_tcg_int_cnt)},
+	{"IGU_EGU_TNL_INT_STS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(igu_egu_tnl_int_cnt)},
+	{"PPU_PF_ABNORMAL_INT_ST_RAS",
+		HNS3_ERR_INT_STATS_FIELD_OFFSET(ppu_pf_abnormal_int_ras_cnt)},
 };
 
 /* The statistic of reset */
@@ -645,6 +691,22 @@ hns3_get_queue_stats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 }
 
+void
+hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err)
+{
+	struct hns3_pf *pf = &hns->pf;
+	uint16_t i;
+	char *addr;
+
+	for (i = 0; i < HNS3_NUM_ERROR_INT_XSTATS; i++) {
+		if (strcmp(hns3_error_int_stats_strings[i].name, err) == 0) {
+			addr = (char *)&pf->abn_int_stats +
+				hns3_error_int_stats_strings[i].offset;
+			*(uint64_t *)addr += 1;
+		}
+	}
+}
+
 /*
  * Retrieve extended(tqp | Mac) statistics of an Ethernet device.
  * @param dev
diff --git a/drivers/net/hns3/hns3_stats.h b/drivers/net/hns3/hns3_stats.h
index 0993c5f57..07570cb31 100644
--- a/drivers/net/hns3/hns3_stats.h
+++ b/drivers/net/hns3/hns3_stats.h
@@ -148,4 +148,6 @@ int hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
 				    const uint64_t *ids,
 				    uint32_t size);
 int hns3_stats_reset(struct rte_eth_dev *dev);
+void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err);
+
 #endif /* _HNS3_STATS_H_ */
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 06/11] net/hns3: support a maximun 256 FDIR counter
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (4 preceding siblings ...)
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 07/11] net/hns3: replace private macro with RTE MAX Wei Hu (Xavier)
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

The FDIR counter was used to count the number of FDIR hit, the maximum
number of the counter is 128 based on kunpeng 920, and it was 256 based
on kunpeng 930.

The firmware is responsible to allocate counters for diffent PF devices,
so the available counter number of one PF may be bigger than 128.

Currently, there are two places using the counter in hns3 PMD driver:
1. Configure the counter. Driver uses the command whose opcode is
   HNS3_OPC_FD_AD_OP, now we extend one bit to hold the high bit of
   counter-id in the command format.
2. Query the statistic information of the counter. Driver uses the command
   whose opcode is HNS3_OPC_FD_COUNTER_OP, now the command already support
   16-bit counter-id.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_fdir.c | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c
index 6ab439d06..5c3dd05f2 100644
--- a/drivers/net/hns3/hns3_fdir.c
+++ b/drivers/net/hns3/hns3_fdir.c
@@ -41,6 +41,8 @@
 #define HNS3_FD_AD_WR_RULE_ID_B	0
 #define HNS3_FD_AD_RULE_ID_S		1
 #define HNS3_FD_AD_RULE_ID_M		GENMASK(13, 1)
+#define HNS3_FD_AD_COUNTER_HIGH_BIT     7
+#define HNS3_FD_AD_COUNTER_HIGH_BIT_B   26
 
 enum HNS3_PORT_TYPE {
 	HOST_PORT,
@@ -424,6 +426,9 @@ static int hns3_fd_ad_config(struct hns3_hw *hw, int loc,
 		     action->write_rule_id_to_bd);
 	hns3_set_field(ad_data, HNS3_FD_AD_RULE_ID_M, HNS3_FD_AD_RULE_ID_S,
 		       action->rule_id);
+	/* set extend bit if counter_id is in [128 ~ 255] */
+	if (action->counter_id & BIT(HNS3_FD_AD_COUNTER_HIGH_BIT))
+		hns3_set_bit(ad_data, HNS3_FD_AD_COUNTER_HIGH_BIT_B, 1);
 	ad_data <<= HNS3_FD_AD_DATA_S;
 	hns3_set_bit(ad_data, HNS3_FD_AD_DROP_B, action->drop_packet);
 	hns3_set_bit(ad_data, HNS3_FD_AD_DIRECT_QID_B,
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 07/11] net/hns3: replace private macro with RTE MAX
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (5 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 06/11] net/hns3: support a maximun 256 FDIR counter Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO Wei Hu (Xavier)
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: Huisong Li <lihuisong@huawei.com>

This patch uses RTE_MAX function in DPDK lib to replace the private
macro named max_t in hns3 PMD driver.

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c | 5 ++---
 drivers/net/hns3/hns3_ethdev.h | 5 -----
 2 files changed, 2 insertions(+), 8 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index b9ee11413..fca035d4f 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -3285,7 +3285,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
 					+ pf->dv_buf_size;
 
 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
-	shared_std = roundup(max_t(uint32_t, shared_buf_min, shared_buf_tc),
+	shared_std = roundup(RTE_MAX(shared_buf_min, shared_buf_tc),
 			     HNS3_BUF_SIZE_UNIT);
 
 	rx_priv = hns3_get_rx_priv_buff_alloced(buf_alloc);
@@ -3315,8 +3315,7 @@ hns3_is_rx_buf_ok(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc,
 		if (tc_num)
 			hi_thrd = hi_thrd / tc_num;
 
-		hi_thrd = max_t(uint32_t, hi_thrd,
-				HNS3_BUF_MUL_BY * aligned_mps);
+		hi_thrd = RTE_MAX(hi_thrd, HNS3_BUF_MUL_BY * aligned_mps);
 		hi_thrd = rounddown(hi_thrd, HNS3_BUF_SIZE_UNIT);
 		lo_thrd = hi_thrd - aligned_mps / HNS3_BUF_DIV_BY;
 	} else {
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index ca4cade42..a42479d64 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -708,11 +708,6 @@ struct hns3_adapter {
 
 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
 
-#define max_t(type, x, y) ({                    \
-	type __max1 = (x);                      \
-	type __max2 = (y);                      \
-	__max1 > __max2 ? __max1 : __max2; })
-
 static inline void hns3_write_reg(void *base, uint32_t reg, uint32_t value)
 {
 	rte_write32(value, (volatile void *)((char *)base + reg));
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (6 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 07/11] net/hns3: replace private macro with RTE MAX Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-09-04 10:34   ` Ferruh Yigit
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 09/11] net/hns3: fix default MAC addr from firmware Wei Hu (Xavier)
                   ` (3 subsequent siblings)
  11 siblings, 1 reply; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

This patch changes the log level from NOTICE to INFO.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index fca035d4f..3827d3277 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5744,5 +5744,5 @@ static struct rte_pci_driver rte_hns3_pmd = {
 RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
-RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
-RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
+RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, INFO);
+RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, INFO);
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 09/11] net/hns3: fix default MAC addr from firmware
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (7 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability Wei Hu (Xavier)
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: Huisong Li <lihuisong@huawei.com>

Currently, default MAC address obtained from firmware in hns3 PF PMD driver
is directly used by .mac_addr_set ops implementation function when the
rte_eth_dev_start API function is executed. At this moment, if the default
MAC addr isn't an unicast address, it will fail to set default MAC addr to
hardware.

So this patch adds the validity check of default MAC addr in hns3 PF PMD
driver. We will use a random unicast address, if the default MAC address
obtained from firmware is not a vailid unicast address.

In addition, this patch also adjusts the location of processing default MAC
addr in hns3 VF PMD driver so as to increase relevance and readability of
the code.

Fixes: eab21776717 ("net/hns3: support setting VF MAC address by PF driver")
Fixes: d51867db65c ("net/hns3: add initialization")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chengchang Tang <tangchengchang@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 11 +++++++++++
 drivers/net/hns3/hns3_ethdev_vf.c | 29 ++++++++++++++---------------
 2 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 3827d3277..14e4b9e35 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -5574,6 +5574,8 @@ static int
 hns3_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct hns3_adapter *hns = eth_dev->data->dev_private;
+	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+	struct rte_ether_addr *eth_addr;
 	struct hns3_hw *hw = &hns->hw;
 	int ret;
 
@@ -5646,6 +5648,15 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
 		goto err_rte_zmalloc;
 	}
 
+	eth_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
+	if (!rte_is_valid_assigned_ether_addr(eth_addr)) {
+		rte_eth_random_addr(hw->mac.mac_addr);
+		rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
+				(struct rte_ether_addr *)hw->mac.mac_addr);
+		hns3_warn(hw, "default mac_addr from firmware is an invalid "
+			  "unicast address, using random MAC address %s",
+			  mac_str);
+	}
 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
 			    &eth_dev->data->mac_addrs[0]);
 
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 3b2ba69bb..7fd0e6a43 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1748,21 +1748,6 @@ hns3vf_init_vf(struct rte_eth_dev *eth_dev)
 		goto err_get_config;
 	}
 
-	/*
-	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
-	 * on the host by "ip link set ..." command. To avoid some incorrect
-	 * scenes, for example, hns3 VF PMD driver fails to receive and send
-	 * packets after user configure the MAC address by using the
-	 * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
-	 * address strategy as the hns3 kernel ethdev driver in the
-	 * initialization. If user configure a MAC address by the ip command
-	 * for VF device, then hns3 VF PMD driver will start with it, otherwise
-	 * start with a random MAC address in the initialization.
-	 */
-	ret = rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr);
-	if (ret)
-		rte_eth_random_addr(hw->mac.mac_addr);
-
 	ret = hns3vf_clear_vport_list(hw);
 	if (ret) {
 		PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
@@ -2644,8 +2629,22 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
 		goto err_rte_zmalloc;
 	}
 
+	/*
+	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
+	 * on the host by "ip link set ..." command. To avoid some incorrect
+	 * scenes, for example, hns3 VF PMD driver fails to receive and send
+	 * packets after user configure the MAC address by using the
+	 * "ip link set ..." command, hns3 VF PMD driver keep the same MAC
+	 * address strategy as the hns3 kernel ethdev driver in the
+	 * initialization. If user configure a MAC address by the ip command
+	 * for VF device, then hns3 VF PMD driver will start with it, otherwise
+	 * start with a random MAC address in the initialization.
+	 */
+	if (rte_is_zero_ether_addr((struct rte_ether_addr *)hw->mac.mac_addr))
+		rte_eth_random_addr(hw->mac.mac_addr);
 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.mac_addr,
 			    &eth_dev->data->mac_addrs[0]);
+
 	hw->adapter_state = HNS3_NIC_INITIALIZED;
 	/*
 	 * Pass the information to the rte_eth_dev_close() that it should also
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (8 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 09/11] net/hns3: fix default MAC addr from firmware Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-09-04 10:34   ` Ferruh Yigit
  2020-09-08 12:28   ` [dpdk-dev] [PATCH v2] " Wei Hu (Xavier)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 11/11] net/hns3: fix some incomplete command structures Wei Hu (Xavier)
  2020-09-03  1:04 ` [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
  11 siblings, 2 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

According to rte_eth_rx_queue_setup and rte_eth_tx_queue_setup API
function, rx_queue_offload_capa and rx_offload_capa, tx_queue_offload_capa
and tx_offload_capa must be mutually exclusive in the '.dev_infos_get' ops
implementation function. Otherwise, rte_eth_rx_queue_setup or
rte_eth_tx_queue_setup will fail, if user uses rx_offload_capa and
tx_offload_capa obtained by calling the rte_eth_dev_info_get API function.

Currently, offload capabilities are enabled for all Rx/Tx queues in hns3
PF and VF PMD driver, and offload capability only applied in a Rx/Tx
queue is not supported. This patch fixes Rx/Tx queue offload capability.

Fixes: 1f5ca0b460cd67 ("net/hns3: support some device operations")
Fixes: a5475d61fa34b8 ("net/hns3: support VF")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 5 +++--
 drivers/net/hns3/hns3_ethdev_vf.c | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 14e4b9e35..281d8b928 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2459,6 +2459,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
+	info->rx_queue_offload_capa = 0;
 	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
 				 DEV_RX_OFFLOAD_TCP_CKSUM |
 				 DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -2472,7 +2473,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_RX_OFFLOAD_JUMBO_FRAME |
 				 DEV_RX_OFFLOAD_RSS_HASH |
 				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	info->tx_queue_offload_capa = 0;
 	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -2483,7 +2484,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 info->tx_queue_offload_capa |
+				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 7fd0e6a43..2f7a96826 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -903,6 +903,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
 	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
 
+	info->rx_queue_offload_capa = 0;
 	info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
 				 DEV_RX_OFFLOAD_UDP_CKSUM |
 				 DEV_RX_OFFLOAD_TCP_CKSUM |
@@ -915,7 +916,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_RX_OFFLOAD_JUMBO_FRAME |
 				 DEV_RX_OFFLOAD_RSS_HASH |
 				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+	info->tx_queue_offload_capa = 0;
 	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -926,7 +927,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 info->tx_queue_offload_capa |
+				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH 11/11] net/hns3: fix some incomplete command structures
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (9 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability Wei Hu (Xavier)
@ 2020-08-25 11:53 ` Wei Hu (Xavier)
  2020-09-03  1:04 ` [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
  11 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-08-25 11:53 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

From: Huisong Li <lihuisong@huawei.com>

The descriptor of the command between firmware and hns3 PMD driver
consists of 8-byte header and 24-byte data field. The contents sent to
firmware are packaged into a command structure as the data field of
command descriptor.

There are some command structures in hns3_dcb.h file that are less than
24 byte. So this patch fixes these incomplete command structures.

Fixes: 62e3ccc2b94c6 ("net/hns3: support flow control")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
 drivers/net/hns3/hns3_dcb.h | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/net/hns3/hns3_dcb.h b/drivers/net/hns3/hns3_dcb.h
index 1636c5ae8..557d88bc1 100644
--- a/drivers/net/hns3/hns3_dcb.h
+++ b/drivers/net/hns3/hns3_dcb.h
@@ -26,16 +26,19 @@ enum hns3_shap_bucket {
 struct hns3_priority_weight_cmd {
 	uint8_t pri_id;
 	uint8_t dwrr;
+	uint8_t rsvd[22];
 };
 
 struct hns3_qs_weight_cmd {
 	uint16_t qs_id;
 	uint8_t dwrr;
+	uint8_t rsvd[21];
 };
 
 struct hns3_pg_weight_cmd {
 	uint8_t pg_id;
 	uint8_t dwrr;
+	uint8_t rsvd[22];
 };
 
 struct hns3_ets_tc_weight_cmd {
@@ -50,6 +53,7 @@ struct hns3_qs_to_pri_link_cmd {
 	uint8_t priority;
 #define HNS3_DCB_QS_PRI_LINK_VLD_MSK	BIT(0)
 	uint8_t link_vld;
+	uint8_t rsvd1[18];
 };
 
 struct hns3_nq_to_qs_link_cmd {
@@ -57,6 +61,7 @@ struct hns3_nq_to_qs_link_cmd {
 	uint16_t rsvd;
 #define HNS3_DCB_Q_QS_LINK_VLD_MSK	BIT(10)
 	uint16_t qset_id;
+	uint8_t rsvd1[18];
 };
 
 #define HNS3_DCB_SHAP_IR_B_MSK  GENMASK(7, 0)
@@ -74,12 +79,14 @@ struct hns3_pri_shapping_cmd {
 	uint8_t pri_id;
 	uint8_t rsvd[3];
 	uint32_t pri_shapping_para;
+	uint32_t rsvd1[4];
 };
 
 struct hns3_pg_shapping_cmd {
 	uint8_t pg_id;
 	uint8_t rsvd[3];
 	uint32_t pg_shapping_para;
+	uint32_t rsvd1[4];
 };
 
 #define HNS3_BP_GRP_NUM		32
@@ -92,16 +99,18 @@ struct hns3_bp_to_qs_map_cmd {
 	uint8_t rsvd[2];
 	uint8_t qs_group_id;
 	uint32_t qs_bit_map;
-	uint32_t rsvd1;
+	uint32_t rsvd1[4];
 };
 
 struct hns3_pfc_en_cmd {
 	uint8_t tx_rx_en_bitmap;
 	uint8_t pri_en_bitmap;
+	uint8_t rsvd[22];
 };
 
 struct hns3_port_shapping_cmd {
 	uint32_t port_shapping_para;
+	uint32_t rsvd[5];
 };
 
 struct hns3_cfg_pause_param_cmd {
@@ -119,6 +128,7 @@ struct hns3_pg_to_pri_link_cmd {
 	uint8_t pg_id;
 	uint8_t rsvd1[3];
 	uint8_t pri_bit_map;
+	uint8_t rsvd2[19];
 };
 
 enum hns3_shaper_level {
-- 
2.27.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver
  2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
                   ` (10 preceding siblings ...)
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 11/11] net/hns3: fix some incomplete command structures Wei Hu (Xavier)
@ 2020-09-03  1:04 ` Wei Hu (Xavier)
  2020-09-04 10:34   ` Ferruh Yigit
  11 siblings, 1 reply; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-03  1:04 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, xavier.huwei

Hi, all

    Are there any comments?

Thanks

Xavier

On 2020/8/25 19:52, Wei Hu (Xavier) wrote:
> This series are features and fixes for hns3 PMD driver.
>
> Huisong Li (3):
>    net/hns3: replace private macro with RTE MAX
>    net/hns3: fix default MAC addr from firmware
>    net/hns3: fix some incomplete command structures
>
> Wei Hu (Xavier) (8):
>    net/hns3: get device capability from firmware
>    net/hns3: get dev specifications from firmware
>    net/hns3: compatibility issues about Rx interrupts
>    net/hns3: compatibility issues about Tx padding short frame
>    net/hns3: add more hardware error types
>    net/hns3: support a maximun 256 FDIR counter
>    net/hns3: change the log level to INFO
>    net/hns3: fix Rx/Tx queue offload capability
>
>   drivers/net/hns3/hns3_cmd.c       |   36 +-
>   drivers/net/hns3/hns3_cmd.h       |   94 ++-
>   drivers/net/hns3/hns3_dcb.c       |    1 -
>   drivers/net/hns3/hns3_dcb.h       |   14 +-
>   drivers/net/hns3/hns3_ethdev.c    |  187 ++++-
>   drivers/net/hns3/hns3_ethdev.h    |  138 +++-
>   drivers/net/hns3/hns3_ethdev_vf.c |  124 ++-
>   drivers/net/hns3/hns3_fdir.c      |    5 +
>   drivers/net/hns3/hns3_intr.c      | 1236 ++++++++++++++++++++++++-----
>   drivers/net/hns3/hns3_intr.h      |   40 +-
>   drivers/net/hns3/hns3_regs.h      |    7 +
>   drivers/net/hns3/hns3_rxtx.c      |   31 +-
>   drivers/net/hns3/hns3_rxtx.h      |   11 +-
>   drivers/net/hns3/hns3_stats.c     |   78 +-
>   drivers/net/hns3/hns3_stats.h     |    2 +
>   15 files changed, 1666 insertions(+), 338 deletions(-)
>

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types
  2020-08-25 11:52 ` [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types Wei Hu (Xavier)
@ 2020-09-04 10:34   ` Ferruh Yigit
  0 siblings, 0 replies; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-04 10:34 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 8/25/2020 12:52 PM, Wei Hu (Xavier) wrote:
> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
> 
> The new firmware adds the hardware error types reported by MSI-x mode.
> These errors are defined as RAS errors in hardware and belong to a
> different type from the MSI-x errors processed by the driver.
> 
> When hardware detects an hardware errors, which need to be handled with
> the driver otherwise the device cannot run properly, it reports error
> information through the MSI-x interrupt. After receiving the interrupt
> reported by the hardware, the driver queries the error information and
> identifies the error level, then rectifies the error. All errors will be
> logged. In addition, the hardware may be reset at the function or global
> level based on the error level. After the reset is complete, the hardware
> will recovers to the normal status.
> 
> Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>

<...>

> +	bd_num = RTE_MAX(mpf_bd_num, pf_bd_num);
>  	desc = rte_zmalloc(NULL, bd_num * sizeof(struct hns3_cmd_desc), 0);
>  	if (desc == NULL) {
> -		hns3_err(hw, "fail to zmalloc desc");
> +		hns3_err(hw,
> +			 "fail to zmalloc desc for handling msix error, size = %lu",
> +			 bd_num * sizeof(struct hns3_cmd_desc));

The log is causing build error for 32bit, because of "%lu" for sizeof() output
[1], fixing as "%lu" -> "%zu" while merging. Doing same for other instance below.



[1]
In file included from .../drivers/net/hns3/hns3_intr.c:16:
.../drivers/net/hns3/hns3_intr.c: In function ‘hns3_handle_msix_error’:
.../drivers/net/hns3/hns3_logs.h:16:38: error: format ‘%lu’ expects argument of
type ‘long unsigned int’, but argument 6 has type ‘uint32_t’ {aka ‘unsigned
int’} [-Werror=format=]
   16 |  rte_log(level, hns3_logtype_driver, "%s %s(): " fmt, \
      |                                      ^~~~~~~~~~~
.../drivers/net/hns3/hns3_logs.h:20:2: note: in expansion of macro ‘PMD_DRV_LOG_RAW’
   20 |  PMD_DRV_LOG_RAW(hw, RTE_LOG_ERR, fmt "\n", ## args)
      |  ^~~~~~~~~~~~~~~
.../drivers/net/hns3/hns3_intr.c:1390:3: note: in expansion of macro ‘hns3_err’
 1390 |   hns3_err(hw,
      |   ^~~~~~~~
.../drivers/net/hns3/hns3_intr.c:1391:61: note: format string is defined here
 1391 |     "fail to zmalloc desc for handling msix error, size = %lu",
      |                                                           ~~^
      |                                                             |
      |                                                             long
unsigned int
      |                                                           %u

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO Wei Hu (Xavier)
@ 2020-09-04 10:34   ` Ferruh Yigit
  2020-09-07 11:34     ` Wei Hu (Xavier)
  0 siblings, 1 reply; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-04 10:34 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
> 
> This patch changes the log level from NOTICE to INFO.
> 
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
> ---
>  drivers/net/hns3/hns3_ethdev.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
> index fca035d4f..3827d3277 100644
> --- a/drivers/net/hns3/hns3_ethdev.c
> +++ b/drivers/net/hns3/hns3_ethdev.c
> @@ -5744,5 +5744,5 @@ static struct rte_pci_driver rte_hns3_pmd = {
>  RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
>  RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
>  RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
> -RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
> -RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
> +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, INFO);
> +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, INFO);
> 

Why making the PMD more verbose by default? You can update the log level
dynamically when need to be more verbose.

Common approach is to set default PMD log level is to 'NOTICE', I think this is
good to make applications more usable.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability Wei Hu (Xavier)
@ 2020-09-04 10:34   ` Ferruh Yigit
  2020-09-08 11:48     ` Wei Hu (Xavier)
  2020-09-08 12:28   ` [dpdk-dev] [PATCH v2] " Wei Hu (Xavier)
  1 sibling, 1 reply; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-04 10:34 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
> 
> According to rte_eth_rx_queue_setup and rte_eth_tx_queue_setup API
> function, rx_queue_offload_capa and rx_offload_capa, tx_queue_offload_capa
> and tx_offload_capa must be mutually exclusive in the '.dev_infos_get' ops
> implementation function. Otherwise, rte_eth_rx_queue_setup or
> rte_eth_tx_queue_setup will fail, if user uses rx_offload_capa and
> tx_offload_capa obtained by calling the rte_eth_dev_info_get API function.

Can you please clarify what is fixed here?

If the PMD doesn't support 'DEV_TX_OFFLOAD_MBUF_FAST_FREE' to be configured per
queue, it makes sense the update the capability reporting to match it.

But having an offload as queue offload shouldn't cause any error on setting it
on port wise (to all queues). I am asking because if you are getting error
'rte_eth_rx_queue_setup()' / 'rte_eth_tx_queue_setup()' the reason can be
something else.
Also what do you mean by "'tx_queue_offload_capa' and 'tx_offload_capa' must be
mutually exclusive"? All queue offloads should be present in the port offload,
because of an offload can be applied to any specific queue, this means it can be
applied to all queues which means it can be applied port wise.

> 
> Currently, offload capabilities are enabled for all Rx/Tx queues in hns3
> PF and VF PMD driver, and offload capability only applied in a Rx/Tx
> queue is not supported. This patch fixes Rx/Tx queue offload capability.
> 
> Fixes: 1f5ca0b460cd67 ("net/hns3: support some device operations")
> Fixes: a5475d61fa34b8 ("net/hns3: support VF")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
> ---
>  drivers/net/hns3/hns3_ethdev.c    | 5 +++--
>  drivers/net/hns3/hns3_ethdev_vf.c | 5 +++--
>  2 files changed, 6 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
> index 14e4b9e35..281d8b928 100644
> --- a/drivers/net/hns3/hns3_ethdev.c
> +++ b/drivers/net/hns3/hns3_ethdev.c
> @@ -2459,6 +2459,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
>  	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
>  	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
>  	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
> +	info->rx_queue_offload_capa = 0;

No need to set 'rx_queue_offload_capa' or 'tx_queue_offload_capa' to zero since
zero is their default value.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver
  2020-09-03  1:04 ` [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
@ 2020-09-04 10:34   ` Ferruh Yigit
  0 siblings, 0 replies; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-04 10:34 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 9/3/2020 2:04 AM, Wei Hu (Xavier) wrote:
> Hi, all
> 
>     Are there any comments?
> 
> Thanks
> 
> Xavier
> 
> On 2020/8/25 19:52, Wei Hu (Xavier) wrote:
>> This series are features and fixes for hns3 PMD driver.
>>
>> Huisong Li (3):
>>    net/hns3: replace private macro with RTE MAX
>>    net/hns3: fix default MAC addr from firmware
>>    net/hns3: fix some incomplete command structures
>>
>> Wei Hu (Xavier) (8):
>>    net/hns3: get device capability from firmware
>>    net/hns3: get dev specifications from firmware
>>    net/hns3: compatibility issues about Rx interrupts
>>    net/hns3: compatibility issues about Tx padding short frame
>>    net/hns3: add more hardware error types
>>    net/hns3: support a maximun 256 FDIR counter
>>    net/hns3: change the log level to INFO
>>    net/hns3: fix Rx/Tx queue offload capability
>>

Since the patches in the set are not dependent to each-other, I partially merged
it. I have already put some comments on the not merged ones (8/11 & 10/11),
after discussion they can be sent as incremental patches.

Except 8/11 & 10/11,
Series applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO
  2020-09-04 10:34   ` Ferruh Yigit
@ 2020-09-07 11:34     ` Wei Hu (Xavier)
  2020-09-07 12:10       ` Ferruh Yigit
  0 siblings, 1 reply; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-07 11:34 UTC (permalink / raw)
  To: Ferruh Yigit, Wei Hu (Xavier), dev; +Cc: xavier.huwei

Hi,Ferruh Yigit

On 2020/9/4 18:34, Ferruh Yigit wrote:
> On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
>> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
>>
>> This patch changes the log level from NOTICE to INFO.
>>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>> ---
>>   drivers/net/hns3/hns3_ethdev.c | 4 ++--
>>   1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
>> index fca035d4f..3827d3277 100644
>> --- a/drivers/net/hns3/hns3_ethdev.c
>> +++ b/drivers/net/hns3/hns3_ethdev.c
>> @@ -5744,5 +5744,5 @@ static struct rte_pci_driver rte_hns3_pmd = {
>>   RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
>>   RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
>>   RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
>> -RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
>> -RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
>> +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, INFO);
>> +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, INFO);
>>
> 
> Why making the PMD more verbose by default? You can update the log level
> dynamically when need to be more verbose.
> 
> Common approach is to set default PMD log level is to 'NOTICE', I think this is
> good to make applications more usable.

Currently hns3 PMD driver invokes the private macro named hns3_info in 
several places, we found that these information wouldn't never be logged.
     hns3_info -> rte_log(RTE_LOG_INFO, hns3_logtype_dirver,...)

To display the related log information, we need to execute the following 
code in the application, right? Is there any other way?
extern int hns3_logtype_driver;
rte_log_set_level(hns3_logtype_driver, RTE_LOG_INFO);

Thanks, Xavier


> 

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO
  2020-09-07 11:34     ` Wei Hu (Xavier)
@ 2020-09-07 12:10       ` Ferruh Yigit
  2020-09-07 12:28         ` Wei Hu (Xavier)
  0 siblings, 1 reply; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-07 12:10 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei

On 9/7/2020 12:34 PM, Wei Hu (Xavier) wrote:
> Hi,Ferruh Yigit
> 
> On 2020/9/4 18:34, Ferruh Yigit wrote:
>> On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
>>> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
>>>
>>> This patch changes the log level from NOTICE to INFO.
>>>
>>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>>> ---
>>>   drivers/net/hns3/hns3_ethdev.c | 4 ++--
>>>   1 file changed, 2 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
>>> index fca035d4f..3827d3277 100644
>>> --- a/drivers/net/hns3/hns3_ethdev.c
>>> +++ b/drivers/net/hns3/hns3_ethdev.c
>>> @@ -5744,5 +5744,5 @@ static struct rte_pci_driver rte_hns3_pmd = {
>>>   RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
>>>   RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
>>>   RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
>>> -RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
>>> -RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
>>> +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, INFO);
>>> +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, INFO);
>>>
>>
>> Why making the PMD more verbose by default? You can update the log level
>> dynamically when need to be more verbose.
>>
>> Common approach is to set default PMD log level is to 'NOTICE', I think this is
>> good to make applications more usable.
> 
> Currently hns3 PMD driver invokes the private macro named hns3_info in 
> several places, we found that these information wouldn't never be logged.
>      hns3_info -> rte_log(RTE_LOG_INFO, hns3_logtype_dirver,...)
> 
> To display the related log information, we need to execute the following 
> code in the application, right? Is there any other way?
> extern int hns3_logtype_driver;
> rte_log_set_level(hns3_logtype_driver, RTE_LOG_INFO);
> 

Hi Xavier,

This is mainly it. Setting log level to something ">= 'RTE_LOG_INFO'" using
'rte_log_set_level()' will display INFO level logs.

There are some variant functions too, 'rte_log_set_level_pattern()' &
'rte_log_set_level_regexp()', these let use change log level of multiple
components, like "pmd.net.*"

Another way is provide the log level to application via eal command
"--log-level=<type-match>:<int>", like "--log-level=*:debug". This arg is
wrapper to above APIs.

Or if you are using testpmd, can change the log level via command "set log"
dynamically, like "set log pmd.net.hns3*".
Applications can do something similar to change log levels on demand based on
their need, so components doesn't need to be verbose by default.



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO
  2020-09-07 12:10       ` Ferruh Yigit
@ 2020-09-07 12:28         ` Wei Hu (Xavier)
  0 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-07 12:28 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, xavier.huwei

Hi, Ferruh Yigit

On 2020/9/7 20:10, Ferruh Yigit wrote:
> On 9/7/2020 12:34 PM, Wei Hu (Xavier) wrote:
>> Hi,Ferruh Yigit
>>
>> On 2020/9/4 18:34, Ferruh Yigit wrote:
>>> On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
>>>> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
>>>>
>>>> This patch changes the log level from NOTICE to INFO.
>>>>
>>>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>>>> ---
>>>>    drivers/net/hns3/hns3_ethdev.c | 4 ++--
>>>>    1 file changed, 2 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
>>>> index fca035d4f..3827d3277 100644
>>>> --- a/drivers/net/hns3/hns3_ethdev.c
>>>> +++ b/drivers/net/hns3/hns3_ethdev.c
>>>> @@ -5744,5 +5744,5 @@ static struct rte_pci_driver rte_hns3_pmd = {
>>>>    RTE_PMD_REGISTER_PCI(net_hns3, rte_hns3_pmd);
>>>>    RTE_PMD_REGISTER_PCI_TABLE(net_hns3, pci_id_hns3_map);
>>>>    RTE_PMD_REGISTER_KMOD_DEP(net_hns3, "* igb_uio | vfio-pci");
>>>> -RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, NOTICE);
>>>> -RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, NOTICE);
>>>> +RTE_LOG_REGISTER(hns3_logtype_init, pmd.net.hns3.init, INFO);
>>>> +RTE_LOG_REGISTER(hns3_logtype_driver, pmd.net.hns3.driver, INFO);
>>>>
>>> Why making the PMD more verbose by default? You can update the log level
>>> dynamically when need to be more verbose.
>>>
>>> Common approach is to set default PMD log level is to 'NOTICE', I think this is
>>> good to make applications more usable.
>> Currently hns3 PMD driver invokes the private macro named hns3_info in
>> several places, we found that these information wouldn't never be logged.
>>       hns3_info -> rte_log(RTE_LOG_INFO, hns3_logtype_dirver,...)
>>
>> To display the related log information, we need to execute the following
>> code in the application, right? Is there any other way?
>> extern int hns3_logtype_driver;
>> rte_log_set_level(hns3_logtype_driver, RTE_LOG_INFO);
>>
> Hi Xavier,
>
> This is mainly it. Setting log level to something ">= 'RTE_LOG_INFO'" using
> 'rte_log_set_level()' will display INFO level logs.
>
> There are some variant functions too, 'rte_log_set_level_pattern()' &
> 'rte_log_set_level_regexp()', these let use change log level of multiple
> components, like "pmd.net.*"
>
> Another way is provide the log level to application via eal command
> "--log-level=<type-match>:<int>", like "--log-level=*:debug". This arg is
> wrapper to above APIs.
>
> Or if you are using testpmd, can change the log level via command "set log"
> dynamically, like "set log pmd.net.hns3*".
> Applications can do something similar to change log levels on demand based on
> their need, so components doesn't need to be verbose by default.
>
Thanks for your detail description.

Regards

Xavier


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability
  2020-09-04 10:34   ` Ferruh Yigit
@ 2020-09-08 11:48     ` Wei Hu (Xavier)
  0 siblings, 0 replies; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-08 11:48 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, xavier.huwei, lihuisong

Hi, Ferruh Yigit

On 2020/9/4 18:34, Ferruh Yigit wrote:
> On 8/25/2020 12:53 PM, Wei Hu (Xavier) wrote:
>> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
>>
>> According to rte_eth_rx_queue_setup and rte_eth_tx_queue_setup API
>> function, rx_queue_offload_capa and rx_offload_capa, tx_queue_offload_capa
>> and tx_offload_capa must be mutually exclusive in the '.dev_infos_get' ops
>> implementation function. Otherwise, rte_eth_rx_queue_setup or
>> rte_eth_tx_queue_setup will fail, if user uses rx_offload_capa and
>> tx_offload_capa obtained by calling the rte_eth_dev_info_get API function.
> Can you please clarify what is fixed here?
>
> If the PMD doesn't support 'DEV_TX_OFFLOAD_MBUF_FAST_FREE' to be configured per
> queue, it makes sense the update the capability reporting to match it.
>
> But having an offload as queue offload shouldn't cause any error on setting it
> on port wise (to all queues). I am asking because if you are getting error
> 'rte_eth_rx_queue_setup()' / 'rte_eth_tx_queue_setup()' the reason can be
> something else.
> Also what do you mean by "'tx_queue_offload_capa' and 'tx_offload_capa' must be
> mutually exclusive"? All queue offloads should be present in the port offload,
> because of an offload can be applied to any specific queue, this means it can be
> applied to all queues which means it can be applied port wise.

"rx_queue_offload_capa and rx_offload_capa, tx_queue_offload_capa

  and tx_offload_capa must be mutually exclusive" -- It's wrong, we 
misunderstood

the process of rte_eth_rx_queue_setup and rte_eth_tx_queue_setup.

Thanks :-)


We will update the commit log as below:

Currently, offload capabilities are only enabled for all Rx/Tx queues in hns3
PF and VF PMD driver, and offload capability only applied in a Rx/Tx
queue is not supported. So this patch moves 'DEV_TX_OFFLOAD_MBUF_FAST_FREE'
from tx_queue_offload_capa to tx_offload_capa.


>> Currently, offload capabilities are enabled for all Rx/Tx queues in hns3
>> PF and VF PMD driver, and offload capability only applied in a Rx/Tx
>> queue is not supported. This patch fixes Rx/Tx queue offload capability.
>>
>> Fixes: 1f5ca0b460cd67 ("net/hns3: support some device operations")
>> Fixes: a5475d61fa34b8 ("net/hns3: support VF")
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
>> ---
>>   drivers/net/hns3/hns3_ethdev.c    | 5 +++--
>>   drivers/net/hns3/hns3_ethdev_vf.c | 5 +++--
>>   2 files changed, 6 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
>> index 14e4b9e35..281d8b928 100644
>> --- a/drivers/net/hns3/hns3_ethdev.c
>> +++ b/drivers/net/hns3/hns3_ethdev.c
>> @@ -2459,6 +2459,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
>>   	info->max_mac_addrs = HNS3_UC_MACADDR_NUM;
>>   	info->max_mtu = info->max_rx_pktlen - HNS3_ETH_OVERHEAD;
>>   	info->max_lro_pkt_size = HNS3_MAX_LRO_SIZE;
>> +	info->rx_queue_offload_capa = 0;
> No need to set 'rx_queue_offload_capa' or 'tx_queue_offload_capa' to zero since
> zero is their default value.

Ok, I  will fix it in V2.

Thanks


Regards

Xavier


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [dpdk-dev] [PATCH v2] net/hns3: fix Rx/Tx queue offload capability
  2020-08-25 11:53 ` [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability Wei Hu (Xavier)
  2020-09-04 10:34   ` Ferruh Yigit
@ 2020-09-08 12:28   ` Wei Hu (Xavier)
  2020-09-15 13:35     ` Ferruh Yigit
  1 sibling, 1 reply; 23+ messages in thread
From: Wei Hu (Xavier) @ 2020-09-08 12:28 UTC (permalink / raw)
  To: dev, ferruh.yigit; +Cc: xavier.huwei, lihuisong

From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>

Currently, offload capabilities are only enabled for all Rx/Tx queues
in hns3 PF/VF PMD driver, and offload capability only applied in a Rx/Tx
queue is not supported. So this patch moves 'DEV_TX_OFFLOAD_MBUF_FAST_FREE'
from tx_queue_offload_capa to tx_offload_capa.

Fixes: 1f5ca0b460cd67 ("net/hns3: support some device operations")
Fixes: a5475d61fa34b8 ("net/hns3: support VF")
Cc: stable@dpdk.org

Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
v1 -> v2: move DEV_TX_OFFLOAD_MBUF_FAST_FREE from tx_queue_offload
          to tx_offload_capa in .dev_info_get ops implementation
	  function.
---
 drivers/net/hns3/hns3_ethdev.c    | 3 +--
 drivers/net/hns3/hns3_ethdev_vf.c | 3 +--
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 0727c6d..7af504e 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2478,7 +2478,6 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_RX_OFFLOAD_JUMBO_FRAME |
 				 DEV_RX_OFFLOAD_RSS_HASH |
 				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -2489,7 +2488,7 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 info->tx_queue_offload_capa |
+				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 44e51b5..0be76f6 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -942,7 +942,6 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_RX_OFFLOAD_JUMBO_FRAME |
 				 DEV_RX_OFFLOAD_RSS_HASH |
 				 DEV_RX_OFFLOAD_TCP_LRO);
-	info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 	info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_IPV4_CKSUM |
 				 DEV_TX_OFFLOAD_TCP_CKSUM |
@@ -953,7 +952,7 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
 				 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
 				 DEV_TX_OFFLOAD_GRE_TNL_TSO |
 				 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
-				 info->tx_queue_offload_capa |
+				 DEV_TX_OFFLOAD_MBUF_FAST_FREE |
 				 hns3_txvlan_cap_get(hw));
 
 	info->rx_desc_lim = (struct rte_eth_desc_lim) {
-- 
2.9.5


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/hns3: fix Rx/Tx queue offload capability
  2020-09-08 12:28   ` [dpdk-dev] [PATCH v2] " Wei Hu (Xavier)
@ 2020-09-15 13:35     ` Ferruh Yigit
  0 siblings, 0 replies; 23+ messages in thread
From: Ferruh Yigit @ 2020-09-15 13:35 UTC (permalink / raw)
  To: Wei Hu (Xavier), dev; +Cc: xavier.huwei, lihuisong

On 9/8/2020 1:28 PM, Wei Hu (Xavier) wrote:
> From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
> 
> Currently, offload capabilities are only enabled for all Rx/Tx queues
> in hns3 PF/VF PMD driver, and offload capability only applied in a Rx/Tx
> queue is not supported. So this patch moves 'DEV_TX_OFFLOAD_MBUF_FAST_FREE'
> from tx_queue_offload_capa to tx_offload_capa.
> 
> Fixes: 1f5ca0b460cd67 ("net/hns3: support some device operations")
> Fixes: a5475d61fa34b8 ("net/hns3: support VF")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>

Applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2020-09-15 13:35 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-25 11:52 [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
2020-08-25 11:52 ` [dpdk-dev] [PATCH 01/11] net/hns3: get device capability from firmware Wei Hu (Xavier)
2020-08-25 11:52 ` [dpdk-dev] [PATCH 02/11] net/hns3: get dev specifications " Wei Hu (Xavier)
2020-08-25 11:52 ` [dpdk-dev] [PATCH 03/11] net/hns3: compatibility issues about Rx interrupts Wei Hu (Xavier)
2020-08-25 11:52 ` [dpdk-dev] [PATCH 04/11] net/hns3: compatibility issues about Tx padding short frame Wei Hu (Xavier)
2020-08-25 11:52 ` [dpdk-dev] [PATCH 05/11] net/hns3: add more hardware error types Wei Hu (Xavier)
2020-09-04 10:34   ` Ferruh Yigit
2020-08-25 11:53 ` [dpdk-dev] [PATCH 06/11] net/hns3: support a maximun 256 FDIR counter Wei Hu (Xavier)
2020-08-25 11:53 ` [dpdk-dev] [PATCH 07/11] net/hns3: replace private macro with RTE MAX Wei Hu (Xavier)
2020-08-25 11:53 ` [dpdk-dev] [PATCH 08/11] net/hns3: change the log level to INFO Wei Hu (Xavier)
2020-09-04 10:34   ` Ferruh Yigit
2020-09-07 11:34     ` Wei Hu (Xavier)
2020-09-07 12:10       ` Ferruh Yigit
2020-09-07 12:28         ` Wei Hu (Xavier)
2020-08-25 11:53 ` [dpdk-dev] [PATCH 09/11] net/hns3: fix default MAC addr from firmware Wei Hu (Xavier)
2020-08-25 11:53 ` [dpdk-dev] [PATCH 10/11] net/hns3: fix Rx/Tx queue offload capability Wei Hu (Xavier)
2020-09-04 10:34   ` Ferruh Yigit
2020-09-08 11:48     ` Wei Hu (Xavier)
2020-09-08 12:28   ` [dpdk-dev] [PATCH v2] " Wei Hu (Xavier)
2020-09-15 13:35     ` Ferruh Yigit
2020-08-25 11:53 ` [dpdk-dev] [PATCH 11/11] net/hns3: fix some incomplete command structures Wei Hu (Xavier)
2020-09-03  1:04 ` [dpdk-dev] [PATCH 00/11] updates for hns3 PMD driver Wei Hu (Xavier)
2020-09-04 10:34   ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).