DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ziyang Xuan <xuanziyang2@huawei.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>, <cloud.wangxiaoyun@huawei.com>,
	<zhouguoyang@huawei.com>, <shahar.belkar@huawei.com>,
	<luoxianjun@huawei.com>, Ziyang Xuan <xuanziyang2@huawei.com>
Subject: [dpdk-dev] [PATCH v5 07/15] net/hinic/base: add code about hardware operation
Date: Wed, 19 Jun 2019 23:58:01 +0800	[thread overview]
Message-ID: <1d0d1cb3bdd9f22e07063f36d7545d26e8d8b9df.1560958308.git.xuanziyang2@huawei.com> (raw)
Message-ID: <20190619155801.7o_WGJ0nk6Ea6mC--CF-n9WWen0AsdoXrSIHoJXbc8I@z> (raw)
In-Reply-To: <cover.1560958308.git.xuanziyang2@huawei.com>

Add code for hardware operation, including configuration,
query and so on.

Signed-off-by: Ziyang Xuan <xuanziyang2@huawei.com>
---
 drivers/net/hinic/base/hinic_pmd_cfg.c   |  208 ++++
 drivers/net/hinic/base/hinic_pmd_cfg.h   |  145 +++
 drivers/net/hinic/base/hinic_pmd_hwdev.c | 1414 ++++++++++++++++++++++
 drivers/net/hinic/base/hinic_pmd_hwdev.h |  485 ++++++++
 4 files changed, 2252 insertions(+)
 create mode 100644 drivers/net/hinic/base/hinic_pmd_cfg.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_cfg.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_hwdev.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_hwdev.h

diff --git a/drivers/net/hinic/base/hinic_pmd_cfg.c b/drivers/net/hinic/base/hinic_pmd_cfg.c
new file mode 100644
index 000000000..61537b12c
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_cfg.c
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_compat.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_eqs.h"
+#include "hinic_pmd_cfg.h"
+
+bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap)
+{
+	if (!IS_NIC_TYPE(hwdev))
+		return false;
+
+	if (cap)
+		memcpy(cap, &hwdev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap));
+
+	return true;
+}
+
+static void hinic_parse_shared_res_cap(struct service_cap *cap,
+					struct hinic_dev_cap *dev_cap,
+					__rte_unused enum func_type type)
+{
+	struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap;
+
+	shared_cap->host_pctxs = dev_cap->host_pctx_num;
+
+	if (dev_cap->host_sf_en)
+		cap->sf_en = true;
+	else
+		cap->sf_en = false;
+
+	shared_cap->host_cctxs = dev_cap->host_ccxt_num;
+	shared_cap->host_scqs = dev_cap->host_scq_num;
+	shared_cap->host_srqs = dev_cap->host_srq_num;
+	shared_cap->host_mpts = dev_cap->host_mpt_num;
+
+	PMD_DRV_LOG(INFO, "Get share resource capability:");
+	PMD_DRV_LOG(INFO, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x",
+		    shared_cap->host_pctxs, shared_cap->host_cctxs,
+		    shared_cap->host_scqs, shared_cap->host_srqs,
+		    shared_cap->host_mpts);
+}
+
+static void hinic_parse_l2nic_res_cap(struct service_cap *cap,
+				struct hinic_dev_cap *dev_cap,
+				enum func_type type)
+{
+	struct nic_service_cap *nic_cap = &cap->nic_cap;
+
+	if (type == TYPE_PF || type == TYPE_PPF) {
+		nic_cap->max_sqs = dev_cap->nic_max_sq + 1;
+		nic_cap->max_rqs = dev_cap->nic_max_rq + 1;
+		nic_cap->vf_max_sqs = dev_cap->nic_vf_max_sq + 1;
+		nic_cap->vf_max_rqs = dev_cap->nic_vf_max_rq + 1;
+	} else {
+		nic_cap->max_sqs = dev_cap->nic_max_sq;
+		nic_cap->max_rqs = dev_cap->nic_max_rq;
+		nic_cap->vf_max_sqs = 0;
+		nic_cap->vf_max_rqs = 0;
+	}
+
+	if (dev_cap->nic_lro_en)
+		nic_cap->lro_en = true;
+	else
+		nic_cap->lro_en = false;
+
+	nic_cap->lro_sz = dev_cap->nic_lro_sz;
+	nic_cap->tso_sz = dev_cap->nic_tso_sz;
+
+	PMD_DRV_LOG(INFO, "Get l2nic resource capability:");
+	PMD_DRV_LOG(INFO, "max_sqs: 0x%x, max_rqs: 0x%x, vf_max_sqs: 0x%x, vf_max_rqs: 0x%x",
+		    nic_cap->max_sqs, nic_cap->max_rqs,
+		    nic_cap->vf_max_sqs, nic_cap->vf_max_rqs);
+}
+
+u16 hinic_func_max_qnum(void *hwdev)
+{
+	struct hinic_hwdev *dev = hwdev;
+
+	return dev->cfg_mgmt->svc_cap.max_sqs;
+}
+
+int init_cfg_mgmt(struct hinic_hwdev *hwdev)
+{
+	struct cfg_mgmt_info *cfg_mgmt;
+
+	cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+	if (!cfg_mgmt)
+		return -ENOMEM;
+
+	hwdev->cfg_mgmt = cfg_mgmt;
+	cfg_mgmt->hwdev = hwdev;
+
+	return 0;
+}
+
+void free_cfg_mgmt(struct hinic_hwdev *hwdev)
+{
+	kfree(hwdev->cfg_mgmt);
+	hwdev->cfg_mgmt = NULL;
+}
+
+static void hinic_parse_pub_res_cap(struct service_cap *cap,
+			      struct hinic_dev_cap *dev_cap,
+			      enum func_type type)
+{
+	cap->host_id = dev_cap->host_id;
+	cap->ep_id = dev_cap->ep_id;
+	cap->max_cos_id = dev_cap->max_cos_id;
+	cap->er_id = dev_cap->er_id;
+	cap->port_id = dev_cap->port_id;
+
+	if (type == TYPE_PF || type == TYPE_PPF) {
+		cap->max_vf = dev_cap->max_vf;
+		cap->pf_num = dev_cap->pf_num;
+		cap->pf_id_start = dev_cap->pf_id_start;
+		cap->vf_num = dev_cap->vf_num;
+		cap->vf_id_start = dev_cap->vf_id_start;
+		cap->max_sqs = dev_cap->nic_max_sq + 1;
+		cap->max_rqs = dev_cap->nic_max_rq + 1;
+	}
+
+	cap->chip_svc_type = CFG_SVC_NIC_BIT0;
+	cap->host_total_function = dev_cap->host_total_func;
+	cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
+
+	PMD_DRV_LOG(INFO, "Get public resource capability:");
+	PMD_DRV_LOG(INFO, "host_id: 0x%x, ep_id: 0x%x, intr_type: 0x%x, max_cos_id: 0x%x, er_id: 0x%x, port_id: 0x%x",
+		    cap->host_id, cap->ep_id, cap->intr_chip_en,
+		    cap->max_cos_id, cap->er_id, cap->port_id);
+	PMD_DRV_LOG(INFO, "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x",
+		    cap->host_total_function, cap->host_oq_id_mask_val,
+		    cap->max_vf);
+	PMD_DRV_LOG(INFO, "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x",
+		    cap->pf_num, cap->pf_id_start,
+		    cap->vf_num, cap->vf_id_start);
+}
+
+static void parse_dev_cap(struct hinic_hwdev *dev,
+			  struct hinic_dev_cap *dev_cap,
+			  enum func_type type)
+{
+	struct service_cap *cap = &dev->cfg_mgmt->svc_cap;
+
+	/* Public resource */
+	hinic_parse_pub_res_cap(cap, dev_cap, type);
+
+	/* PPF managed dynamic resource */
+	if (type == TYPE_PPF)
+		hinic_parse_shared_res_cap(cap, dev_cap, type);
+
+	/* L2 NIC resource */
+	if (IS_NIC_TYPE(dev))
+		hinic_parse_l2nic_res_cap(cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic_hwdev *dev, enum func_type type)
+{
+	int err;
+	u16 in_len, out_len;
+	struct hinic_dev_cap dev_cap;
+
+	memset(&dev_cap, 0, sizeof(dev_cap));
+	in_len = sizeof(dev_cap);
+	out_len = in_len;
+	dev_cap.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	err = hinic_msg_to_mgmt_sync(dev, HINIC_MOD_CFGM, HINIC_CFG_NIC_CAP,
+				     &dev_cap, in_len, &dev_cap, &out_len, 0);
+	if (err || dev_cap.mgmt_msg_head.status || !out_len) {
+		PMD_DRV_LOG(ERR, "Get capability from FW failed, err: %d, status: %d, out_len: %d",
+			err, dev_cap.mgmt_msg_head.status, out_len);
+		return -EFAULT;
+	}
+
+	parse_dev_cap(dev, &dev_cap, type);
+	return 0;
+}
+
+static int get_dev_cap(struct hinic_hwdev *dev)
+{
+	int err;
+	enum func_type type = HINIC_FUNC_TYPE(dev);
+
+	switch (type) {
+	case TYPE_PF:
+	case TYPE_PPF:
+		err = get_cap_from_fw(dev, type);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Get PF/PPF capability failed");
+			return err;
+		}
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported PCI function type");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int hinic_init_capability(struct hinic_hwdev *hwdev)
+{
+	return get_dev_cap(hwdev);
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_cfg.h b/drivers/net/hinic/base/hinic_pmd_cfg.h
new file mode 100644
index 000000000..1741ca44a
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_cfg.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_CFG_H_
+#define _HINIC_PMD_CFG_H_
+
+#define CFG_MAX_CMD_TIMEOUT     8000 /* ms */
+
+#define IS_NIC_TYPE(dev) \
+	((dev)->cfg_mgmt->svc_cap.chip_svc_type & CFG_SVC_NIC_BIT0)
+
+struct host_shared_resource_cap {
+	u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */
+	u32 host_cctxs; /* Child Context: max 8K */
+	u32 host_scqs;  /* shared CQ, chip interface module uses 1 SCQ
+			 * TOE/IOE/FCoE each uses 1 SCQ
+			 * RoCE/IWARP uses multiple SCQs
+			 * So 6 SCQ least
+			 */
+	u32 host_srqs; /* SRQ number: 256K */
+	u32 host_mpts; /* MR number:1M */
+};
+
+struct nic_service_cap {
+	/* PF resources */
+	u16 max_sqs;
+	u16 max_rqs;
+
+	/* VF resources, VF obtain them through the MailBox mechanism from
+	 * corresponding PF
+	 */
+	u16 vf_max_sqs;
+	u16 vf_max_rqs;
+
+	bool lro_en;    /* LRO feature enable bit */
+	u8 lro_sz;      /* LRO context space: n*16B */
+	u8 tso_sz;      /* TSO context space: n*16B */
+};
+
+/* service type relates define */
+enum cfg_svc_type_en {
+	CFG_SVC_NIC_BIT0    = (1 << 0),
+};
+
+/* device capability */
+struct service_cap {
+	enum cfg_svc_type_en chip_svc_type;	/* HW supported service type */
+
+	/* Host global resources */
+	u16 host_total_function;
+	u8 host_oq_id_mask_val;
+	u8 host_id;
+	u8 ep_id;
+	u8 intr_chip_en;
+	u8 max_cos_id;	/* PF/VF's max cos id */
+	u8 er_id;	/* PF/VF's ER */
+	u8 port_id;	/* PF/VF's physical port */
+	u8 max_vf;	/* max VF number that PF supported */
+	bool sf_en;	/* stateful business status */
+	u16 max_sqs;
+	u16 max_rqs;
+
+	u32 pf_num;
+	u32 pf_id_start;
+	u32 vf_num;
+	u32 vf_id_start;
+
+	struct host_shared_resource_cap shared_res_cap; /* shared capability */
+	struct nic_service_cap      nic_cap;            /* NIC capability */
+};
+
+struct cfg_mgmt_info {
+	struct hinic_hwdev *hwdev;
+	struct service_cap  svc_cap;
+};
+
+struct hinic_dev_cap {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	/* Public resource */
+	u8 sf_svc_attr;
+	u8 host_id;
+	u8 sf_en_pf;
+	u8 sf_en_vf;
+
+	u8 ep_id;
+	u8 intr_type;
+	u8 max_cos_id;
+	u8 er_id;
+	u8 port_id;
+	u8 max_vf;
+	u16 svc_cap_en;
+	u16 host_total_func;
+	u8 host_oq_id_mask_val;
+	u8 max_vf_cos_id;
+
+	u32 max_conn_num;
+	u16 max_stick2cache_num;
+	u16 max_bfilter_start_addr;
+	u16 bfilter_len;
+	u16 hash_bucket_num;
+	u8 cfg_file_ver;
+	u8 net_port_mode;
+	u8 valid_cos_bitmap;	/* every bit indicate cos is valid */
+	u8 rsvd1;
+	u32 pf_num;
+	u32 pf_id_start;
+	u32 vf_num;
+	u32 vf_id_start;
+
+	/* shared resource */
+	u32 host_pctx_num;
+	u8 host_sf_en;
+	u8 rsvd2[3];
+	u32 host_ccxt_num;
+	u32 host_scq_num;
+	u32 host_srq_num;
+	u32 host_mpt_num;
+
+	/* l2nic */
+	u16 nic_max_sq;
+	u16 nic_max_rq;
+	u16 nic_vf_max_sq;
+	u16 nic_vf_max_rq;
+	u8 nic_lro_en;
+	u8 nic_lro_sz;
+	u8 nic_tso_sz;
+	u8 rsvd3;
+
+	u32 rsvd4[50];
+};
+
+/* Obtain service_cap.nic_cap.dev_nic_cap.max_sqs */
+u16 hinic_func_max_qnum(void *hwdev);
+
+int init_cfg_mgmt(struct hinic_hwdev *hwdev);
+
+void free_cfg_mgmt(struct hinic_hwdev *hwdev);
+
+int hinic_init_capability(struct hinic_hwdev *hwdev);
+
+bool hinic_support_nic(struct hinic_hwdev *hwdev, struct nic_service_cap *cap);
+
+#endif /* _HINIC_PMD_CFG_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.c b/drivers/net/hinic/base/hinic_pmd_hwdev.c
new file mode 100644
index 000000000..4f70bafe5
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.c
@@ -0,0 +1,1414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include<rte_ethdev_driver.h>
+#include <rte_bus_pci.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+
+#include "hinic_compat.h"
+#include "hinic_csr.h"
+#include "hinic_pmd_hwdev.h"
+#include "hinic_pmd_hwif.h"
+#include "hinic_pmd_wq.h"
+#include "hinic_pmd_cmdq.h"
+#include "hinic_pmd_mgmt.h"
+#include "hinic_pmd_niccfg.h"
+
+#define HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT		0
+#define HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG		0xFF
+#define HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG		7
+
+#define HINIC_FLR_TIMEOUT				1000
+
+#define FFM_RECORD_NUM_MAX				32
+
+#define HINIC_DMA_ATTR_ENTRY_ST_SHIFT			0
+#define HINIC_DMA_ATTR_ENTRY_AT_SHIFT			8
+#define HINIC_DMA_ATTR_ENTRY_PH_SHIFT			10
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT		12
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_SHIFT		13
+
+#define HINIC_DMA_ATTR_ENTRY_ST_MASK			0xFF
+#define HINIC_DMA_ATTR_ENTRY_AT_MASK			0x3
+#define HINIC_DMA_ATTR_ENTRY_PH_MASK			0x3
+#define HINIC_DMA_ATTR_ENTRY_NO_SNOOPING_MASK		0x1
+#define HINIC_DMA_ATTR_ENTRY_TPH_EN_MASK		0x1
+
+#define HINIC_DMA_ATTR_ENTRY_SET(val, member)			\
+		(((u32)(val) & HINIC_DMA_ATTR_ENTRY_##member##_MASK) << \
+			HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)
+
+#define HINIC_DMA_ATTR_ENTRY_CLEAR(val, member)		\
+		((val) & (~(HINIC_DMA_ATTR_ENTRY_##member##_MASK	\
+			<< HINIC_DMA_ATTR_ENTRY_##member##_SHIFT)))
+
+#define HINIC_PCIE_ST_DISABLE				0
+#define HINIC_PCIE_AT_DISABLE				0
+#define HINIC_PCIE_PH_DISABLE				0
+#define PCIE_MSIX_ATTR_ENTRY				0
+
+#define HINIC_HASH_FUNC					rte_jhash
+#define HINIC_HASH_KEY_LEN				(sizeof(dma_addr_t))
+#define HINIC_HASH_FUNC_INIT_VAL			0
+
+static const char *__hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {
+	"RS-FEC", "BASE-FEC", "NO-FEC"};
+
+static const char *__hw_to_char_port_type[LINK_PORT_MAX_TYPE] = {
+	"Unknown", "Fibre", "Electric", "Direct Attach Copper", "AOC",
+	"Back plane", "BaseT"
+};
+
+static const char *hinic_module_link_err[LINK_ERR_NUM] = {
+	"Unrecognized module",
+};
+
+static void *
+hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
+		dma_addr_t *dma_handle, unsigned int flag, unsigned int align)
+{
+	int rc, alloc_cnt;
+	const struct rte_memzone *mz;
+	char z_name[RTE_MEMZONE_NAMESIZE];
+	hash_sig_t sig;
+	rte_iova_t iova;
+
+	if (dma_handle == NULL || 0 == size)
+		return NULL;
+
+	alloc_cnt = rte_atomic32_add_return(&hwdev->os_dep.dma_alloc_cnt, 1);
+	snprintf(z_name, sizeof(z_name), "%s_%d",
+		 hwdev->pcidev_hdl->name, alloc_cnt);
+
+	mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
+					 flag, align);
+	if (!mz) {
+		PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
+			    rte_errno, z_name, size);
+		return NULL;
+	}
+
+	iova = mz->iova;
+
+	/* check if phys_addr already exist */
+	sig = HINIC_HASH_FUNC(&iova, HINIC_HASH_KEY_LEN,
+			      HINIC_HASH_FUNC_INIT_VAL);
+	rc = rte_hash_lookup_with_hash(hwdev->os_dep.dma_addr_hash,
+				       &iova, sig);
+	if (rc >= 0) {
+		PMD_DRV_LOG(ERR, "Dma addr: %p already in hash table, error: %d, mz_name: %s",
+			(void *)iova, rc, z_name);
+		goto phys_addr_hash_err;
+	}
+
+	/* record paddr in hash table */
+	rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
+	rc = rte_hash_add_key_with_hash_data(hwdev->os_dep.dma_addr_hash,
+					     &iova, sig,
+					     (void *)(u64)mz);
+	rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
+	if (rc) {
+		PMD_DRV_LOG(ERR, "Insert dma addr: %p hash failed, error: %d, mz_name: %s",
+			(void *)iova, rc, z_name);
+		goto phys_addr_hash_err;
+	}
+	*dma_handle = iova;
+	memset(mz->addr, 0, size);
+
+	return mz->addr;
+
+phys_addr_hash_err:
+	(void)rte_memzone_free(mz);
+
+	return NULL;
+}
+
+static void
+hinic_dma_mem_free(struct hinic_hwdev *hwdev, size_t size,
+		   void *virt, dma_addr_t phys)
+{
+	int rc;
+	struct rte_memzone *mz = NULL;
+	struct rte_hash *hash;
+	hash_sig_t sig;
+
+	if (virt == NULL || phys == 0)
+		return;
+
+	hash = hwdev->os_dep.dma_addr_hash;
+	sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
+			      HINIC_HASH_FUNC_INIT_VAL);
+	rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
+	if (rc < 0) {
+		PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
+			(void *)phys, rc);
+		return;
+	}
+
+	if (virt != mz->addr || size > mz->len) {
+		PMD_DRV_LOG(ERR, "Match mz_info failed: "
+			"mz.name: %s, mz.phys: %p, mz.virt: %p, mz.len: %zu, "
+			"phys: %p, virt: %p, size: %zu",
+			mz->name, (void *)mz->iova, mz->addr, mz->len,
+			(void *)phys, virt, size);
+	}
+
+	rte_spinlock_lock(&hwdev->os_dep.dma_hash_lock);
+	(void)rte_hash_del_key_with_hash(hash, &phys, sig);
+	rte_spinlock_unlock(&hwdev->os_dep.dma_hash_lock);
+
+	(void)rte_memzone_free(mz);
+}
+
+void *dma_zalloc_coherent(void *hwdev, size_t size,
+			  dma_addr_t *dma_handle, gfp_t flag)
+{
+	return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
+				    RTE_CACHE_LINE_SIZE);
+}
+
+void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
+				  dma_addr_t *dma_handle, gfp_t flag)
+{
+	return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
+				    HINIC_PAGE_SIZE);
+}
+
+void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
+				      dma_addr_t *dma_handle, gfp_t flag)
+{
+	return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
+				    HINIC_PAGE_SIZE * 64);
+}
+
+void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
+{
+	hinic_dma_mem_free(hwdev, size, virt, phys);
+}
+
+void dma_free_coherent_volatile(void *hwdev, size_t size,
+				volatile void *virt, dma_addr_t phys)
+{
+	int rc;
+	struct rte_memzone *mz = NULL;
+	struct hinic_hwdev *dev = hwdev;
+	struct rte_hash *hash;
+	hash_sig_t sig;
+
+	if (virt == NULL || phys == 0)
+		return;
+
+	hash = dev->os_dep.dma_addr_hash;
+	sig = HINIC_HASH_FUNC(&phys, HINIC_HASH_KEY_LEN,
+			      HINIC_HASH_FUNC_INIT_VAL);
+	rc = rte_hash_lookup_with_hash_data(hash, &phys, sig, (void **)&mz);
+	if (rc < 0) {
+		PMD_DRV_LOG(ERR, "Can not find phys_addr: %p, error: %d",
+			(void *)phys, rc);
+		return;
+	}
+
+	if (virt != mz->addr || size > mz->len) {
+		PMD_DRV_LOG(ERR, "Match mz_info failed: "
+			"mz.name:%s, mz.phys:%p, mz.virt:%p, mz.len:%zu, "
+			"phys:%p, virt:%p, size:%zu",
+			mz->name, (void *)mz->iova, mz->addr, mz->len,
+			(void *)phys, virt, size);
+	}
+
+	rte_spinlock_lock(&dev->os_dep.dma_hash_lock);
+	(void)rte_hash_del_key_with_hash(hash, &phys, sig);
+	rte_spinlock_unlock(&dev->os_dep.dma_hash_lock);
+
+	(void)rte_memzone_free(mz);
+}
+
+struct dma_pool *dma_pool_create(const char *name, void *dev,
+				 size_t size, size_t align, size_t boundary)
+{
+	struct pci_pool *pool;
+
+	pool = rte_zmalloc(NULL, sizeof(*pool), HINIC_MEM_ALLOC_ALIGNE_MIN);
+	if (!pool)
+		return NULL;
+
+	pool->inuse = 0;
+	pool->elem_size = size;
+	pool->align = align;
+	pool->boundary = boundary;
+	pool->hwdev = dev;
+	strncpy(pool->name, name, (sizeof(pool->name) - 1));
+
+	return pool;
+}
+
+void dma_pool_destroy(struct dma_pool *pool)
+{
+	if (!pool)
+		return;
+
+	if (pool->inuse != 0) {
+		PMD_DRV_LOG(ERR, "Leak memory, dma_pool:%s, inuse_count:%u",
+			    pool->name, pool->inuse);
+	}
+
+	rte_free(pool);
+}
+
+void *dma_pool_alloc(struct pci_pool *pool, int flags, dma_addr_t *dma_addr)
+{
+	void *buf;
+
+	buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size,
+				   dma_addr, flags, (u32)pool->align);
+	if (buf)
+		pool->inuse++;
+
+	return buf;
+}
+
+void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma)
+{
+	pool->inuse--;
+	hinic_dma_mem_free(pool->hwdev, pool->elem_size, vaddr, dma);
+}
+
+
+
+#define HINIC_MAX_DMA_ENTRIES		8192
+int hinic_osdep_init(struct hinic_hwdev *hwdev)
+{
+	struct rte_hash_parameters dh_params = { 0 };
+	struct rte_hash *paddr_hash = NULL;
+
+	rte_atomic32_set(&hwdev->os_dep.dma_alloc_cnt, 0);
+	rte_spinlock_init(&hwdev->os_dep.dma_hash_lock);
+
+	dh_params.name = hwdev->pcidev_hdl->name;
+	dh_params.entries = HINIC_MAX_DMA_ENTRIES;
+	dh_params.key_len = HINIC_HASH_KEY_LEN;
+	dh_params.hash_func = HINIC_HASH_FUNC;
+	dh_params.hash_func_init_val = HINIC_HASH_FUNC_INIT_VAL;
+	dh_params.socket_id = SOCKET_ID_ANY;
+
+	paddr_hash = rte_hash_find_existing(dh_params.name);
+	if (paddr_hash == NULL) {
+		paddr_hash = rte_hash_create(&dh_params);
+		if (paddr_hash == NULL) {
+			PMD_DRV_LOG(ERR, "Create nic_dev phys_addr hash table failed");
+			return -ENOMEM;
+		}
+	} else {
+		PMD_DRV_LOG(INFO, "Using existing dma hash table %s",
+			    dh_params.name);
+	}
+	hwdev->os_dep.dma_addr_hash = paddr_hash;
+
+	return 0;
+}
+
+void hinic_osdep_deinit(struct hinic_hwdev *hwdev)
+{
+	uint32_t iter = 0;
+	dma_addr_t key_pa;
+	struct rte_memzone *data_mz = NULL;
+	struct rte_hash *paddr_hash = hwdev->os_dep.dma_addr_hash;
+
+	if (paddr_hash) {
+		/* iterate through the hash table */
+		while (rte_hash_iterate(paddr_hash, (const void **)&key_pa,
+					(void **)&data_mz, &iter) >= 0) {
+			if (data_mz) {
+				PMD_DRV_LOG(WARNING, "Free leaked dma_addr: %p, mz: %s",
+					(void *)key_pa, data_mz->name);
+				(void)rte_memzone_free(data_mz);
+			}
+		}
+
+		/* free phys_addr hash table */
+		rte_hash_free(paddr_hash);
+	}
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * hinic_set_ci_table - set ci attribute table
+ * @hwdev: the hardware interface of a nic device
+ * @q_id: Queue id of SQ
+ * @attr: Point to SQ CI attribute table
+ * @return
+ *   0 on success and ci attribute table is filled,
+ *   negative error value otherwise.
+ **/
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr)
+{
+	struct hinic_cons_idx_attr cons_idx_attr;
+
+	memset(&cons_idx_attr, 0, sizeof(cons_idx_attr));
+	cons_idx_attr.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	cons_idx_attr.func_idx = hinic_global_func_id(hwdev);
+	cons_idx_attr.dma_attr_off  = attr->dma_attr_off;
+	cons_idx_attr.pending_limit = attr->pending_limit;
+	cons_idx_attr.coalescing_time = attr->coalescing_time;
+	if (attr->intr_en) {
+		cons_idx_attr.intr_en = attr->intr_en;
+		cons_idx_attr.intr_idx = attr->intr_idx;
+	}
+
+	cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+	cons_idx_attr.sq_id = q_id;
+	cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				      HINIC_MGMT_CMD_L2NIC_SQ_CI_ATTR_SET,
+				      &cons_idx_attr, sizeof(cons_idx_attr),
+				      NULL, NULL, 0);
+}
+
+/**
+ * hinic_set_pagesize - set page size to vat table
+ * @hwdev: the hardware interface of a nic device
+ * @page_size: vat page size
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+int hinic_set_pagesize(void *hwdev, u8 page_size)
+{
+	struct hinic_page_size cmd;
+
+	if (page_size > HINIC_PAGE_SIZE_MAX) {
+		PMD_DRV_LOG(ERR, "Invalid page_size %u, bigger than %u",
+		       page_size, HINIC_PAGE_SIZE_MAX);
+		return -EINVAL;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	cmd.func_idx = hinic_global_func_id(hwdev);
+	cmd.ppf_idx = hinic_ppf_idx(hwdev);
+	cmd.page_size = page_size;
+
+	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+					HINIC_MGMT_CMD_PAGESIZE_SET,
+					&cmd, sizeof(cmd),
+					NULL, NULL, 0);
+}
+
+static int wait_for_flr_finish(struct hinic_hwif *hwif)
+{
+	unsigned long end;
+	enum hinic_pf_status status;
+
+	end = jiffies + msecs_to_jiffies(HINIC_FLR_TIMEOUT);
+	do {
+		status = hinic_get_pf_status(hwif);
+		if (status == HINIC_PF_STATUS_FLR_FINISH_FLAG) {
+			hinic_set_pf_status(hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
+			return 0;
+		}
+
+		rte_delay_ms(10);
+	} while (time_before(jiffies, end));
+
+	return -EFAULT;
+}
+
+#define HINIC_WAIT_CMDQ_IDLE_TIMEOUT		1000
+
+static int wait_cmdq_stop(struct hinic_hwdev *hwdev)
+{
+	enum hinic_cmdq_type cmdq_type;
+	struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
+	unsigned long end;
+	int err = 0;
+
+	if (!(cmdqs->status & HINIC_CMDQ_ENABLE))
+		return 0;
+
+	cmdqs->status &= ~HINIC_CMDQ_ENABLE;
+
+	end = jiffies + msecs_to_jiffies(HINIC_WAIT_CMDQ_IDLE_TIMEOUT);
+	do {
+		err = 0;
+		cmdq_type = HINIC_CMDQ_SYNC;
+		for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
+			if (!hinic_cmdq_idle(&cmdqs->cmdq[cmdq_type])) {
+				err = -EBUSY;
+				break;
+			}
+		}
+
+		if (!err)
+			return 0;
+
+		rte_delay_ms(1);
+	} while (time_before(jiffies, end));
+
+	cmdqs->status |= HINIC_CMDQ_ENABLE;
+
+	return err;
+}
+
+/**
+ * hinic_pf_rx_tx_flush - clean up hardware resource
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+static int hinic_pf_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+	struct hinic_hwif *hwif = hwdev->hwif;
+	struct hinic_clear_doorbell clear_db;
+	struct hinic_clear_resource clr_res;
+	int err;
+
+	rte_delay_ms(100);
+
+	err = wait_cmdq_stop(hwdev);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Cmdq is still working");
+		return err;
+	}
+
+	hinic_disable_doorbell(hwif);
+	memset(&clear_db, 0, sizeof(clear_db));
+	clear_db.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	clear_db.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+	clear_db.ppf_idx  = HINIC_HWIF_PPF_IDX(hwif);
+	err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_FLUSH_DOORBELL, &clear_db,
+				     sizeof(clear_db), NULL, NULL, 0);
+	if (err)
+		PMD_DRV_LOG(WARNING, "Flush doorbell failed");
+
+	hinic_set_pf_status(hwif, HINIC_PF_STATUS_FLR_START_FLAG);
+	memset(&clr_res, 0, sizeof(clr_res));
+	clr_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	clr_res.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+	clr_res.ppf_idx  = HINIC_HWIF_PPF_IDX(hwif);
+
+	err = hinic_msg_to_mgmt_no_ack(hwdev, HINIC_MOD_COMM,
+				       HINIC_MGMT_CMD_START_FLR, &clr_res,
+				       sizeof(clr_res), NULL, NULL);
+	if (err)
+		PMD_DRV_LOG(WARNING, "Notice flush message failed");
+
+	err = wait_for_flr_finish(hwif);
+	if (err)
+		PMD_DRV_LOG(WARNING, "Wait firmware FLR timeout");
+
+	hinic_enable_doorbell(hwif);
+
+	err = hinic_reinit_cmdq_ctxts(hwdev);
+	if (err)
+		PMD_DRV_LOG(WARNING, "Reinit cmdq failed");
+
+	return 0;
+}
+
+int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+	return hinic_pf_rx_tx_flush(hwdev);
+}
+
+/**
+ * hinic_get_interrupt_cfg - get interrupt configuration from NIC
+ * @hwdev: the hardware interface of a nic device
+ * @interrupt_info: Information of Interrupt aggregation
+ * Return: 0 on success, negative error value otherwise.
+ **/
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+				struct nic_interrupt_info *interrupt_info)
+{
+	struct hinic_msix_config msix_cfg;
+	u16 out_size = sizeof(msix_cfg);
+	int err;
+
+	memset(&msix_cfg, 0, sizeof(msix_cfg));
+	msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	msix_cfg.func_id = hinic_global_func_id(hwdev);
+	msix_cfg.msix_index = interrupt_info->msix_index;
+
+	err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_MSI_CTRL_REG_RD_BY_UP,
+				     &msix_cfg, sizeof(msix_cfg),
+				     &msix_cfg, &out_size, 0);
+	if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
+		PMD_DRV_LOG(ERR, "Get interrupt config failed, ret: %d",
+			msix_cfg.mgmt_msg_head.status);
+		return -EINVAL;
+	}
+
+	interrupt_info->lli_credit_limit = msix_cfg.lli_credit_cnt;
+	interrupt_info->lli_timer_cfg = msix_cfg.lli_tmier_cnt;
+	interrupt_info->pending_limt = msix_cfg.pending_cnt;
+	interrupt_info->coalesc_timer_cfg = msix_cfg.coalesct_timer_cnt;
+	interrupt_info->resend_timer_cfg = msix_cfg.resend_timer_cnt;
+	return 0;
+}
+
+/**
+ * hinic_set_interrupt_cfg - set interrupt configuration to NIC
+ * @hwdev: the hardware interface of a nic device
+ * @interrupt_info: Information of Interrupt aggregation
+ * Return: 0 on success, negative error value otherwise.
+ **/
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+			    struct nic_interrupt_info interrupt_info)
+{
+	struct hinic_msix_config msix_cfg;
+	struct nic_interrupt_info temp_info;
+	u16 out_size = sizeof(msix_cfg);
+	int err;
+
+	memset(&msix_cfg, 0, sizeof(msix_cfg));
+	msix_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	msix_cfg.func_id = hinic_global_func_id(hwdev);
+	msix_cfg.msix_index = (u16)interrupt_info.msix_index;
+
+	temp_info.msix_index = interrupt_info.msix_index;
+
+	err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+	if (err)
+		return -EINVAL;
+
+	msix_cfg.lli_credit_cnt = temp_info.lli_credit_limit;
+	msix_cfg.lli_tmier_cnt = temp_info.lli_timer_cfg;
+	msix_cfg.pending_cnt = temp_info.pending_limt;
+	msix_cfg.coalesct_timer_cnt = temp_info.coalesc_timer_cfg;
+	msix_cfg.resend_timer_cnt = temp_info.resend_timer_cfg;
+
+	if (interrupt_info.lli_set) {
+		msix_cfg.lli_credit_cnt = interrupt_info.lli_credit_limit;
+		msix_cfg.lli_tmier_cnt = interrupt_info.lli_timer_cfg;
+	}
+
+	if (interrupt_info.interrupt_coalesc_set) {
+		msix_cfg.pending_cnt = interrupt_info.pending_limt;
+		msix_cfg.coalesct_timer_cnt = interrupt_info.coalesc_timer_cfg;
+		msix_cfg.resend_timer_cnt = interrupt_info.resend_timer_cfg;
+	}
+
+	err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_MSI_CTRL_REG_WR_BY_UP,
+				     &msix_cfg, sizeof(msix_cfg),
+				     &msix_cfg, &out_size, 0);
+	if (err || !out_size || msix_cfg.mgmt_msg_head.status) {
+		PMD_DRV_LOG(ERR, "Set interrupt config failed, ret: %d",
+			msix_cfg.mgmt_msg_head.status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * init_aeqs_msix_attr - Init interrupt attributes of aeq
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+int init_aeqs_msix_attr(void *hwdev)
+{
+	struct hinic_hwdev *nic_hwdev = hwdev;
+	struct hinic_aeqs *aeqs = nic_hwdev->aeqs;
+	struct nic_interrupt_info info = {0};
+	struct hinic_eq *eq;
+	u16 q_id;
+	int err;
+
+	info.lli_set = 0;
+	info.interrupt_coalesc_set = 1;
+	info.pending_limt = HINIC_DEAULT_EQ_MSIX_PENDING_LIMIT;
+	info.coalesc_timer_cfg = HINIC_DEAULT_EQ_MSIX_COALESC_TIMER_CFG;
+	info.resend_timer_cfg = HINIC_DEAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+	for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+		eq = &aeqs->aeq[q_id];
+		info.msix_index = eq->eq_irq.msix_entry_idx;
+		err = hinic_set_interrupt_cfg(hwdev, info);
+		if (err) {
+			PMD_DRV_LOG(ERR, "Set msix attr for aeq %d failed",
+				    q_id);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * set_pf_dma_attr_entry - set the dma attributes for entry
+ * @hwdev: the pointer to the private hardware device object
+ * @entry_idx: the entry index in the dma table
+ * @st: PCIE TLP steering tag
+ * @at:	PCIE TLP AT field
+ * @ph: PCIE TLP Processing Hint field
+ * @no_snooping: PCIE TLP No snooping
+ * @tph_en: PCIE TLP Processing Hint Enable
+ **/
+static void set_pf_dma_attr_entry(struct hinic_hwdev *hwdev, u32 entry_idx,
+				  u8 st, u8 at, u8 ph,
+				  enum hinic_pcie_nosnoop no_snooping,
+				  enum hinic_pcie_tph tph_en)
+{
+	u32 addr, val, dma_attr_entry;
+
+	/* Read Modify Write */
+	addr = HINIC_CSR_DMA_ATTR_TBL_ADDR(entry_idx);
+
+	val = hinic_hwif_read_reg(hwdev->hwif, addr);
+	val = HINIC_DMA_ATTR_ENTRY_CLEAR(val, ST)	&
+		HINIC_DMA_ATTR_ENTRY_CLEAR(val, AT)	&
+		HINIC_DMA_ATTR_ENTRY_CLEAR(val, PH)	&
+		HINIC_DMA_ATTR_ENTRY_CLEAR(val, NO_SNOOPING)	&
+		HINIC_DMA_ATTR_ENTRY_CLEAR(val, TPH_EN);
+
+	dma_attr_entry = HINIC_DMA_ATTR_ENTRY_SET(st, ST)	|
+			 HINIC_DMA_ATTR_ENTRY_SET(at, AT)	|
+			 HINIC_DMA_ATTR_ENTRY_SET(ph, PH)	|
+			 HINIC_DMA_ATTR_ENTRY_SET(no_snooping, NO_SNOOPING) |
+			 HINIC_DMA_ATTR_ENTRY_SET(tph_en, TPH_EN);
+
+	val |= dma_attr_entry;
+	hinic_hwif_write_reg(hwdev->hwif, addr, val);
+}
+
+/**
+ * dma_attr_table_init - initialize the the default dma attributes
+ * @hwdev: the pointer to the private hardware device object
+ **/
+static void dma_attr_table_init(struct hinic_hwdev *hwdev)
+{
+	if (HINIC_IS_VF(hwdev))
+		return;
+
+	set_pf_dma_attr_entry(hwdev, PCIE_MSIX_ATTR_ENTRY,
+			      HINIC_PCIE_ST_DISABLE,
+			      HINIC_PCIE_AT_DISABLE,
+			      HINIC_PCIE_PH_DISABLE,
+			      HINIC_PCIE_SNOOP,
+			      HINIC_PCIE_TPH_DISABLE);
+}
+
+int hinic_init_attr_table(struct hinic_hwdev *hwdev)
+{
+	dma_attr_table_init(hwdev);
+
+	return init_aeqs_msix_attr(hwdev);
+}
+
+#define FAULT_SHOW_STR_LEN 16
+static void fault_report_show(struct hinic_hwdev *hwdev,
+			      struct hinic_fault_event *event)
+{
+	char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = {
+		"chip", "ucode", "mem rd timeout", "mem wr timeout",
+		"reg rd timeout", "reg wr timeout"};
+	char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = {
+		"fatal", "reset", "flr", "general", "suggestion"};
+	char type_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
+	char level_str[FAULT_SHOW_STR_LEN + 1] = { 0 };
+	u8 err_level;
+
+	PMD_DRV_LOG(WARNING, "Fault event report received, func_id: %d",
+		 hinic_global_func_id(hwdev));
+
+	if (event->type < FAULT_TYPE_MAX)
+		strncpy(type_str, fault_type[event->type], FAULT_SHOW_STR_LEN);
+	else
+		strncpy(type_str, "unknown", FAULT_SHOW_STR_LEN);
+	PMD_DRV_LOG(WARNING, "fault type:    %d [%s]",
+		 event->type, type_str);
+	PMD_DRV_LOG(WARNING, "fault val[0]:  0x%08x",
+		 event->event.val[0]);
+	PMD_DRV_LOG(WARNING, "fault val[1]:  0x%08x",
+		 event->event.val[1]);
+	PMD_DRV_LOG(WARNING, "fault val[2]:  0x%08x",
+		 event->event.val[2]);
+	PMD_DRV_LOG(WARNING, "fault val[3]:  0x%08x",
+		 event->event.val[3]);
+
+	switch (event->type) {
+	case FAULT_TYPE_CHIP:
+		err_level = event->event.chip.err_level;
+		if (err_level < FAULT_LEVEL_MAX)
+			strncpy(level_str, fault_level[err_level],
+				FAULT_SHOW_STR_LEN);
+		else
+			strncpy(level_str, "unknown",
+				FAULT_SHOW_STR_LEN);
+
+		PMD_DRV_LOG(WARNING, "err_level:     %d [%s]",
+			 err_level, level_str);
+
+		if (err_level == FAULT_LEVEL_SERIOUS_FLR) {
+			PMD_DRV_LOG(WARNING, "flr func_id:   %d",
+				 event->event.chip.func_id);
+		} else {
+			PMD_DRV_LOG(WARNING, "node_id:       %d",
+				 event->event.chip.node_id);
+			PMD_DRV_LOG(WARNING, "err_type:      %d",
+				 event->event.chip.err_type);
+			PMD_DRV_LOG(WARNING, "err_csr_addr:  %d",
+				 event->event.chip.err_csr_addr);
+			PMD_DRV_LOG(WARNING, "err_csr_value: %d",
+				 event->event.chip.err_csr_value);
+		}
+		break;
+	case FAULT_TYPE_UCODE:
+		PMD_DRV_LOG(WARNING, "cause_id:      %d",
+			 event->event.ucode.cause_id);
+		PMD_DRV_LOG(WARNING, "core_id:       %d",
+			 event->event.ucode.core_id);
+		PMD_DRV_LOG(WARNING, "c_id:          %d",
+			 event->event.ucode.c_id);
+		PMD_DRV_LOG(WARNING, "epc:           %d",
+			 event->event.ucode.epc);
+		break;
+	case FAULT_TYPE_MEM_RD_TIMEOUT:
+	case FAULT_TYPE_MEM_WR_TIMEOUT:
+		PMD_DRV_LOG(WARNING, "err_csr_ctrl:  %d",
+			 event->event.mem_timeout.err_csr_ctrl);
+		PMD_DRV_LOG(WARNING, "err_csr_data:  %d",
+			 event->event.mem_timeout.err_csr_data);
+		PMD_DRV_LOG(WARNING, "ctrl_tab:      %d",
+			 event->event.mem_timeout.ctrl_tab);
+		PMD_DRV_LOG(WARNING, "mem_index:     %d",
+			 event->event.mem_timeout.mem_index);
+		break;
+	case FAULT_TYPE_REG_RD_TIMEOUT:
+	case FAULT_TYPE_REG_WR_TIMEOUT:
+		PMD_DRV_LOG(WARNING, "err_csr:       %d",
+			 event->event.reg_timeout.err_csr);
+		break;
+	default:
+		break;
+	}
+}
+
+static int resources_state_set(struct hinic_hwdev *hwdev,
+			       enum hinic_res_state state)
+{
+	struct hinic_hwif *hwif = hwdev->hwif;
+	struct hinic_cmd_set_res_state res_state;
+
+	memset(&res_state, 0, sizeof(res_state));
+	res_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	res_state.func_idx = HINIC_HWIF_GLOBAL_IDX(hwif);
+	res_state.state = state;
+
+	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				 HINIC_MGMT_CMD_RES_STATE_SET,
+				 &res_state, sizeof(res_state), NULL, NULL, 0);
+}
+
+/**
+ * hinic_activate_hwdev_state - Active host nic state and notify mgmt channel
+ * that host nic is ready.
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev)
+{
+	int rc = HINIC_OK;
+
+	if (!hwdev)
+		return -EINVAL;
+
+	if (!HINIC_IS_VF(hwdev))
+		hinic_set_pf_status(hwdev->hwif,
+				    HINIC_PF_STATUS_ACTIVE_FLAG);
+
+	rc = resources_state_set(hwdev, HINIC_RES_ACTIVE);
+	if (rc) {
+		PMD_DRV_LOG(ERR, "Initialize resources state failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * hinic_deactivate_hwdev_state - Deactivate host nic state and notify mgmt
+ * channel that host nic is not ready.
+ * @hwdev: the pointer to the private hardware device object
+ **/
+void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev)
+{
+	int rc = HINIC_OK;
+
+	if (!hwdev)
+		return;
+
+	rc = resources_state_set(hwdev, HINIC_RES_CLEAN);
+	if (rc)
+		PMD_DRV_LOG(ERR, "Deinit resources state failed");
+
+	if (!HINIC_IS_VF(hwdev))
+		hinic_set_pf_status(hwdev->hwif, HINIC_PF_STATUS_INIT);
+}
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info)
+{
+	struct hinic_comm_board_info board_info;
+	u16 out_size = sizeof(board_info);
+	int err;
+
+	if (!hwdev || !info)
+		return -EINVAL;
+
+	memset(&board_info, 0, sizeof(board_info));
+	board_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_GET_BOARD_INFO,
+				     &board_info, sizeof(board_info),
+				     &board_info, &out_size, 0);
+	if (err || board_info.mgmt_msg_head.status || !out_size) {
+		PMD_DRV_LOG(ERR, "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x",
+			err, board_info.mgmt_msg_head.status, out_size);
+		return -EFAULT;
+	}
+
+	memcpy(info, &board_info.info, sizeof(*info));
+	return 0;
+}
+
+/**
+ * hinic_l2nic_reset - Restore the initial state of NIC
+ * @hwdev: the hardware interface of a nic device
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
+{
+	struct hinic_hwif *hwif = hwdev->hwif;
+	struct hinic_l2nic_reset l2nic_reset;
+	int err = 0;
+
+	err = hinic_set_vport_enable(hwdev, false);
+	if (err) {
+		PMD_DRV_LOG(ERR, "Set vport disable failed");
+		return err;
+	}
+
+	rte_delay_ms(100);
+
+	memset(&l2nic_reset, 0, sizeof(l2nic_reset));
+	l2nic_reset.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	l2nic_reset.func_id = HINIC_HWIF_GLOBAL_IDX(hwif);
+	err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+				     HINIC_MGMT_CMD_L2NIC_RESET,
+				     &l2nic_reset, sizeof(l2nic_reset),
+				     NULL, NULL, 0);
+	if (err || l2nic_reset.mgmt_msg_head.status) {
+		PMD_DRV_LOG(ERR, "Reset L2NIC resources failed");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void
+hinic_show_sw_watchdog_timeout_info(void *buf_in, u16 in_size,
+				    void *buf_out, u16 *out_size)
+{
+	struct hinic_mgmt_watchdog_info *watchdog_info;
+	u32 *dump_addr, *reg, stack_len, i, j;
+
+	if (in_size != sizeof(*watchdog_info)) {
+		PMD_DRV_LOG(ERR, "Invalid mgmt watchdog report, length: %d, should be %zu",
+			in_size, sizeof(*watchdog_info));
+		return;
+	}
+
+	watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_in;
+
+	PMD_DRV_LOG(ERR, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%x",
+		watchdog_info->curr_time_h, watchdog_info->curr_time_l,
+		watchdog_info->task_id, watchdog_info->sp);
+	PMD_DRV_LOG(ERR, "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%x, bottom: 0x%x",
+		watchdog_info->curr_used, watchdog_info->peak_used,
+		watchdog_info->is_overflow, watchdog_info->stack_top,
+		watchdog_info->stack_bottom);
+
+	PMD_DRV_LOG(ERR, "Mgmt pc: 0x%08x, lr: 0x%08x, cpsr:0x%08x",
+		watchdog_info->pc, watchdog_info->lr, watchdog_info->cpsr);
+
+	PMD_DRV_LOG(ERR, "Mgmt register info");
+
+	for (i = 0; i < 3; i++) {
+		reg = watchdog_info->reg + (u64)(u32)(4 * i);
+		PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
+			*(reg), *(reg + 1), *(reg + 2), *(reg + 3));
+	}
+
+	PMD_DRV_LOG(ERR, "0x%08x", watchdog_info->reg[12]);
+
+	if (watchdog_info->stack_actlen <= 1024) {
+		stack_len = watchdog_info->stack_actlen;
+	} else {
+		PMD_DRV_LOG(ERR, "Oops stack length: 0x%x is wrong",
+			watchdog_info->stack_actlen);
+		stack_len = 1024;
+	}
+
+	PMD_DRV_LOG(ERR, "Mgmt dump stack, 16Bytes per line(start from sp)");
+	for (i = 0; i < (stack_len / 16); i++) {
+		dump_addr = (u32 *)(watchdog_info->data + ((u64)(u32)(i * 16)));
+		PMD_DRV_LOG(ERR, "0x%08x 0x%08x 0x%08x 0x%08x",
+			*dump_addr, *(dump_addr + 1), *(dump_addr + 2),
+			*(dump_addr + 3));
+	}
+
+	for (j = 0; j < ((stack_len % 16) / 4); j++) {
+		dump_addr = (u32 *)(watchdog_info->data +
+			    ((u64)(u32)(i * 16 + j * 4)));
+		PMD_DRV_LOG(ERR, "0x%08x", *dump_addr);
+	}
+
+	*out_size = sizeof(*watchdog_info);
+	watchdog_info = (struct hinic_mgmt_watchdog_info *)buf_out;
+	watchdog_info->mgmt_msg_head.status = 0;
+}
+
+static void hinic_show_pcie_dfx_info(struct hinic_hwdev *hwdev,
+				     void *buf_in, u16 in_size,
+				     void *buf_out, u16 *out_size)
+{
+	struct hinic_pcie_dfx_ntc *notice_info =
+		(struct hinic_pcie_dfx_ntc *)buf_in;
+	struct hinic_pcie_dfx_info dfx_info;
+	u16 size = 0;
+	u16 cnt = 0;
+	u32 num = 0;
+	u32 i, j;
+	int err;
+	u32 *reg;
+
+	if (in_size != sizeof(*notice_info)) {
+		PMD_DRV_LOG(ERR, "Invalid pcie dfx notice info, length: %d, should be %zu.",
+			in_size, sizeof(*notice_info));
+		return;
+	}
+
+	((struct hinic_pcie_dfx_ntc *)buf_out)->mgmt_msg_head.status = 0;
+	*out_size = sizeof(*notice_info);
+	memset(&dfx_info, 0, sizeof(dfx_info));
+	num = (u32)(notice_info->len / 1024);
+	PMD_DRV_LOG(INFO, "INFO LEN: %d", notice_info->len);
+	PMD_DRV_LOG(INFO, "PCIE DFX:");
+	dfx_info.host_id = 0;
+	dfx_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+	for (i = 0; i < num; i++) {
+		dfx_info.offset = i * MAX_PCIE_DFX_BUF_SIZE;
+		if (i == (num - 1))
+			dfx_info.last = 1;
+		size = sizeof(dfx_info);
+		err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+					     HINIC_MGMT_CMD_PCIE_DFX_GET,
+					     &dfx_info, sizeof(dfx_info),
+					     &dfx_info, &size, 0);
+		if (err || dfx_info.mgmt_msg_head.status || !size) {
+			PMD_DRV_LOG(ERR, "Failed to get pcie dfx info, err: %d, status: 0x%x, out size: 0x%x",
+				err, dfx_info.mgmt_msg_head.status, size);
+			return;
+		}
+
+		reg = (u32 *)dfx_info.data;
+		for (j = 0; j < 256; j = j + 8) {
+			PMD_DRV_LOG(ERR, "0x%04x: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x",
+				cnt, reg[j], reg[(u32)(j + 1)],
+				reg[(u32)(j + 2)], reg[(u32)(j + 3)],
+				reg[(u32)(j + 4)], reg[(u32)(j + 5)],
+				reg[(u32)(j + 6)], reg[(u32)(j + 7)]);
+			cnt = cnt + 32;
+		}
+		memset(dfx_info.data, 0, MAX_PCIE_DFX_BUF_SIZE);
+	}
+}
+
+static void
+hinic_show_ffm_info(struct hinic_hwdev *hwdev, void *buf_in, u16 in_size)
+{
+	struct ffm_intr_info *intr;
+
+	if (in_size != sizeof(struct ffm_intr_info)) {
+		PMD_DRV_LOG(ERR, "Invalid input buffer len, length: %d, should be %zu.",
+			in_size, sizeof(struct ffm_intr_info));
+		return;
+	}
+
+	if (hwdev->ffm_num < FFM_RECORD_NUM_MAX) {
+		hwdev->ffm_num++;
+		intr = (struct ffm_intr_info *)buf_in;
+		PMD_DRV_LOG(WARNING, "node_id(%d),err_csr_addr(0x%x),err_csr_val(0x%x),err_level(0x%x),err_type(0x%x)",
+			    intr->node_id,
+			    intr->err_csr_addr,
+			    intr->err_csr_value,
+			    intr->err_level,
+			    intr->err_type);
+	}
+}
+
+void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+				   void *buf_in, u16 in_size,
+				   void *buf_out, u16 *out_size)
+{
+	struct hinic_cmd_fault_event *fault_event, *ret_fault_event;
+
+	if (!hwdev)
+		return;
+
+	*out_size = 0;
+
+	switch (cmd) {
+	case HINIC_MGMT_CMD_FAULT_REPORT:
+		if (in_size != sizeof(*fault_event)) {
+			PMD_DRV_LOG(ERR, "Invalid fault event report, length: %d, should be %zu",
+				in_size, sizeof(*fault_event));
+			return;
+		}
+
+		fault_event = (struct hinic_cmd_fault_event *)buf_in;
+		fault_report_show(hwdev, &fault_event->event);
+
+		if (hinic_func_type(hwdev) != TYPE_VF) {
+			ret_fault_event =
+				(struct hinic_cmd_fault_event *)buf_out;
+			ret_fault_event->mgmt_msg_head.status = 0;
+			*out_size = sizeof(*ret_fault_event);
+		}
+		break;
+
+	case HINIC_MGMT_CMD_WATCHDOG_INFO:
+		hinic_show_sw_watchdog_timeout_info(buf_in, in_size,
+						    buf_out, out_size);
+		break;
+
+	case HINIC_MGMT_CMD_PCIE_DFX_NTC:
+		hinic_show_pcie_dfx_info(hwdev, buf_in, in_size,
+					 buf_out, out_size);
+		break;
+
+	case HINIC_MGMT_CMD_FFM_SET:
+		hinic_show_ffm_info(hwdev, buf_in, in_size);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void
+hinic_cable_status_event(u8 cmd, void *buf_in, __rte_unused u16 in_size,
+			 void *buf_out, u16 *out_size)
+{
+	struct hinic_cable_plug_event *plug_event;
+	struct hinic_link_err_event *link_err;
+
+	if (cmd == HINIC_PORT_CMD_CABLE_PLUG_EVENT) {
+		plug_event = (struct hinic_cable_plug_event *)buf_in;
+		PMD_DRV_LOG(INFO, "Port module event: Cable %s",
+			 plug_event->plugged ? "plugged" : "unplugged");
+
+		*out_size = sizeof(*plug_event);
+		plug_event = (struct hinic_cable_plug_event *)buf_out;
+		plug_event->mgmt_msg_head.status = 0;
+	} else if (cmd == HINIC_PORT_CMD_LINK_ERR_EVENT) {
+		link_err = (struct hinic_link_err_event *)buf_in;
+		if (link_err->err_type >= LINK_ERR_NUM) {
+			PMD_DRV_LOG(ERR, "Link failed, Unknown type: 0x%x",
+				link_err->err_type);
+		} else {
+			PMD_DRV_LOG(INFO, "Link failed, type: 0x%x: %s",
+				 link_err->err_type,
+				 hinic_module_link_err[link_err->err_type]);
+		}
+
+		*out_size = sizeof(*link_err);
+		link_err = (struct hinic_link_err_event *)buf_out;
+		link_err->mgmt_msg_head.status = 0;
+	}
+}
+
+static int hinic_link_event_process(struct hinic_hwdev *hwdev,
+				    struct rte_eth_dev *eth_dev, u8 status)
+{
+	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
+					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
+					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
+					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
+	struct nic_port_info port_info;
+	struct rte_eth_link link;
+	int rc = HINIC_OK;
+
+	if (!status) {
+		link.link_status = ETH_LINK_DOWN;
+		link.link_speed = 0;
+		link.link_duplex = ETH_LINK_HALF_DUPLEX;
+		link.link_autoneg = ETH_LINK_FIXED;
+	} else {
+		link.link_status = ETH_LINK_UP;
+
+		memset(&port_info, 0, sizeof(port_info));
+		rc = hinic_get_port_info(hwdev, &port_info);
+		if (rc) {
+			link.link_speed = ETH_SPEED_NUM_NONE;
+			link.link_duplex = ETH_LINK_FULL_DUPLEX;
+			link.link_autoneg = ETH_LINK_FIXED;
+		} else {
+			link.link_speed = port_speed[port_info.speed %
+						LINK_SPEED_MAX];
+			link.link_duplex = port_info.duplex;
+			link.link_autoneg = port_info.autoneg_state;
+		}
+	}
+	(void)rte_eth_linkstatus_set(eth_dev, &link);
+
+	return rc;
+}
+
+static void hinic_lsc_process(struct hinic_hwdev *hwdev,
+			      struct rte_eth_dev *rte_dev, u8 status)
+{
+	int ret;
+
+	ret = hinic_link_event_process(hwdev, rte_dev, status);
+	/* check if link has changed, notify callback */
+	if (ret == 0)
+		_rte_eth_dev_callback_process(rte_dev,
+					      RTE_ETH_EVENT_INTR_LSC,
+					      NULL);
+}
+
+void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev,
+				    void *param, u8 cmd,
+				    void *buf_in, u16 in_size,
+				    void *buf_out, u16 *out_size)
+{
+	struct hinic_port_link_status *in_link;
+	struct rte_eth_dev *eth_dev;
+
+	if (!hwdev)
+		return;
+
+	*out_size = 0;
+
+	switch (cmd) {
+	case HINIC_PORT_CMD_LINK_STATUS_REPORT:
+		eth_dev = param;
+		in_link = (struct hinic_port_link_status *)buf_in;
+		PMD_DRV_LOG(INFO, "Link status event report, dev_name: %s, port_id: %d, link_status: %s",
+			 eth_dev->data->name, eth_dev->data->port_id,
+			 in_link->link ? "UP" : "DOWN");
+
+		hinic_lsc_process(hwdev, eth_dev, in_link->link);
+		break;
+
+	case HINIC_PORT_CMD_CABLE_PLUG_EVENT:
+	case HINIC_PORT_CMD_LINK_ERR_EVENT:
+		hinic_cable_status_event(cmd, buf_in, in_size,
+					 buf_out, out_size);
+		break;
+
+	case HINIC_PORT_CMD_MGMT_RESET:
+		PMD_DRV_LOG(WARNING, "Mgmt is reset");
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported event %d to process",
+			cmd);
+		break;
+	}
+}
+
+static void print_cable_info(struct hinic_link_info *info)
+{
+	char tmp_str[512] = {0};
+	char tmp_vendor[17] = {0};
+	const char *port_type = "Unknown port type";
+	int i;
+
+	if (info->cable_absent) {
+		PMD_DRV_LOG(INFO, "Cable unpresent");
+		return;
+	}
+
+	if (info->port_type < LINK_PORT_MAX_TYPE)
+		port_type = __hw_to_char_port_type[info->port_type];
+	else
+		PMD_DRV_LOG(INFO, "Unknown port type: %u",
+			 info->port_type);
+	if (info->port_type == LINK_PORT_FIBRE) {
+		if (info->port_sub_type == FIBRE_SUBTYPE_SR)
+			port_type = "Fibre-SR";
+		else if (info->port_sub_type == FIBRE_SUBTYPE_LR)
+			port_type = "Fibre-LR";
+	}
+
+	for (i = sizeof(info->vendor_name) - 1; i >= 0; i--) {
+		if (info->vendor_name[i] == ' ')
+			info->vendor_name[i] = '\0';
+		else
+			break;
+	}
+
+	memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name));
+	snprintf(tmp_str, (sizeof(tmp_str) - 1),
+		 "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps",
+		 tmp_vendor, info->sfp_type ? "SFP" : "QSFP", port_type,
+		 info->cable_length, info->cable_max_speed);
+	if (info->port_type != LINK_PORT_COPPER)
+		snprintf(tmp_str + strlen(tmp_str), (sizeof(tmp_str) - 1),
+			 "%s, Temperature: %u", tmp_str,
+			 info->cable_temp);
+
+	PMD_DRV_LOG(INFO, "Cable information: %s", tmp_str);
+}
+
+static void print_hi30_status(struct hinic_link_info *info)
+{
+	struct hi30_ffe_data *ffe_data;
+	struct hi30_ctle_data *ctle_data;
+
+	ffe_data = (struct hi30_ffe_data *)info->hi30_ffe;
+	ctle_data = (struct hi30_ctle_data *)info->hi30_ctle;
+
+	PMD_DRV_LOG(INFO, "TX_FFE: PRE2=%s%d; PRE1=%s%d; MAIN=%d; POST1=%s%d; POST1X=%s%d",
+		 (ffe_data->PRE1 & 0x10) ? "-" : "",
+		 (int)(ffe_data->PRE1 & 0xf),
+		 (ffe_data->PRE2 & 0x10) ? "-" : "",
+		 (int)(ffe_data->PRE2 & 0xf),
+		 (int)ffe_data->MAIN,
+		 (ffe_data->POST1 & 0x10) ? "-" : "",
+		 (int)(ffe_data->POST1 & 0xf),
+		 (ffe_data->POST2 & 0x10) ? "-" : "",
+		 (int)(ffe_data->POST2 & 0xf));
+	PMD_DRV_LOG(INFO, "RX_CTLE: Gain1~3=%u %u %u; Boost1~3=%u %u %u; Zero1~3=%u %u %u; Squelch1~3=%u %u %u",
+		 ctle_data->ctlebst[0], ctle_data->ctlebst[1],
+		 ctle_data->ctlebst[2], ctle_data->ctlecmband[0],
+		 ctle_data->ctlecmband[1], ctle_data->ctlecmband[2],
+		 ctle_data->ctlermband[0], ctle_data->ctlermband[1],
+		 ctle_data->ctlermband[2], ctle_data->ctleza[0],
+		 ctle_data->ctleza[1], ctle_data->ctleza[2]);
+}
+
+static void print_link_info(struct hinic_link_info *info,
+			    enum hilink_info_print_event type)
+{
+	const char *fec = "None";
+
+	if (info->fec < HILINK_FEC_MAX_TYPE)
+		fec = __hw_to_char_fec[info->fec];
+	else
+		PMD_DRV_LOG(INFO, "Unknown fec type: %u",
+			 info->fec);
+
+	if (type == HILINK_EVENT_LINK_UP || !info->an_state) {
+		PMD_DRV_LOG(INFO, "Link information: speed %dGbps, %s, autoneg %s",
+			 info->speed, fec, info->an_state ? "on" : "off");
+	} else {
+		PMD_DRV_LOG(INFO, "Link information: antoneg: %s",
+			 info->an_state ? "on" : "off");
+	}
+}
+
+static const char *hilink_info_report_type[HILINK_EVENT_MAX_TYPE] = {
+	"", "link up", "link down", "cable plugged"
+};
+
+static void hinic_print_hilink_info(void *buf_in, u16 in_size,
+				    void *buf_out, u16 *out_size)
+{
+	struct hinic_hilink_link_info *hilink_info =
+		(struct hinic_hilink_link_info *)buf_in;
+	struct hinic_link_info *info;
+	enum hilink_info_print_event type;
+
+	if (in_size != sizeof(*hilink_info)) {
+		PMD_DRV_LOG(ERR, "Invalid hilink info message size %d, should be %zu",
+			in_size, sizeof(*hilink_info));
+		return;
+	}
+
+	((struct hinic_hilink_link_info *)buf_out)->mgmt_msg_head.status = 0;
+	*out_size = sizeof(*hilink_info);
+
+	info = &hilink_info->info;
+	type = hilink_info->info_type;
+
+	if (type < HILINK_EVENT_LINK_UP || type >= HILINK_EVENT_MAX_TYPE) {
+		PMD_DRV_LOG(INFO, "Invalid hilink info report, type: %d",
+			 type);
+		return;
+	}
+
+	PMD_DRV_LOG(INFO, "Hilink info report after %s",
+		 hilink_info_report_type[type]);
+
+	print_cable_info(info);
+
+	print_link_info(info, type);
+
+	print_hi30_status(info);
+
+	if (type == HILINK_EVENT_LINK_UP)
+		return;
+
+	if (type == HILINK_EVENT_CABLE_PLUGGED) {
+		PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u",
+			 info->alos, info->rx_los);
+		return;
+	}
+
+	PMD_DRV_LOG(INFO, "PMA ctrl: %s, MAC tx %s, MAC rx %s, PMA debug inforeg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x",
+		 info->pma_status ? "on" : "off",
+		 info->mac_tx_en ? "enable" : "disable",
+		 info->mac_rx_en ? "enable" : "disable", info->pma_dbg_info_reg,
+		 info->pma_signal_ok_reg, info->rf_lf_status_reg);
+	PMD_DRV_LOG(INFO, "alos: %u, rx_los: %u, PCS block counter reg: 0x%x,PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x",
+		 info->alos, info->rx_los, info->pcs_err_blk_cnt_reg,
+		 info->pcs_link_reg, info->mac_link_reg, info->pcs_err_cnt);
+}
+
+void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+				     void *buf_in, u16 in_size,
+				     void *buf_out, u16 *out_size)
+{
+	if (!hwdev)
+		return;
+
+	*out_size = 0;
+
+	switch (cmd) {
+	case HINIC_HILINK_CMD_GET_LINK_INFO:
+		hinic_print_hilink_info(buf_in, in_size, buf_out,
+					out_size);
+		break;
+
+	default:
+		PMD_DRV_LOG(ERR, "Unsupported event %d to process",
+			cmd);
+		break;
+	}
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_hwdev.h b/drivers/net/hinic/base/hinic_pmd_hwdev.h
new file mode 100644
index 000000000..6c21c475f
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_hwdev.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_HWDEV_H_
+#define _HINIC_PMD_HWDEV_H_
+
+#include "hinic_pmd_cmd.h"
+
+#define HINIC_PAGE_SIZE_MAX	20
+
+#define HINIC_MGMT_CMD_UNSUPPORTED	0xFF
+#define HINIC_PF_SET_VF_ALREADY		0x4
+
+#define MAX_PCIE_DFX_BUF_SIZE		1024
+
+/* dma pool */
+struct dma_pool {
+	u32 inuse;
+	size_t elem_size;
+	size_t align;
+	size_t boundary;
+	void *hwdev;
+
+	char name[32];
+};
+
+enum hinic_res_state {
+	HINIC_RES_CLEAN = 0,
+	HINIC_RES_ACTIVE = 1,
+};
+
+enum hilink_info_print_event {
+	HILINK_EVENT_LINK_UP = 1,
+	HILINK_EVENT_LINK_DOWN,
+	HILINK_EVENT_CABLE_PLUGGED,
+	HILINK_EVENT_MAX_TYPE,
+};
+
+struct hinic_port_link_status {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_id;
+	u8	link;
+	u8	port_id;
+};
+
+enum link_err_status {
+	LINK_ERR_MODULE_UNRECOGENIZED,
+	LINK_ERR_NUM,
+};
+
+struct hinic_cable_plug_event {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_id;
+	u8	plugged;	/* 0: unplugged, 1: plugged */
+	u8	port_id;
+};
+
+struct hinic_link_err_event {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_id;
+	u8	err_type;
+	u8	port_id;
+};
+
+struct hinic_cons_idx_attr {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_idx;
+	u8	dma_attr_off;
+	u8	pending_limit;
+	u8	coalescing_time;
+	u8	intr_en;
+	u16	intr_idx;
+	u32	l2nic_sqn;
+	u32	sq_id;
+	u64	ci_addr;
+};
+
+struct hinic_clear_doorbell {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_idx;
+	u8	ppf_idx;
+	u8	rsvd1;
+};
+
+struct hinic_clear_resource {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_idx;
+	u8	ppf_idx;
+	u8	rsvd1;
+};
+
+struct hinic_cmd_set_res_state {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_idx;
+	u8	state;
+	u8	rsvd1;
+	u32	rsvd2;
+};
+
+struct hinic_l2nic_reset {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16 func_id;
+	u16 rsvd1;
+};
+
+struct hinic_page_size {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_idx;
+	u8	ppf_idx;
+	u8	page_size;
+	u32	rsvd;
+};
+
+struct hinic_msix_config {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	func_id;
+	u16	msix_index;
+	u8	pending_cnt;
+	u8	coalesct_timer_cnt;
+	u8	lli_tmier_cnt;
+	u8	lli_credit_cnt;
+	u8	resend_timer_cnt;
+	u8	rsvd1[3];
+};
+
+/* defined by chip */
+enum hinic_fault_type {
+	FAULT_TYPE_CHIP,
+	FAULT_TYPE_UCODE,
+	FAULT_TYPE_MEM_RD_TIMEOUT,
+	FAULT_TYPE_MEM_WR_TIMEOUT,
+	FAULT_TYPE_REG_RD_TIMEOUT,
+	FAULT_TYPE_REG_WR_TIMEOUT,
+	FAULT_TYPE_MAX,
+};
+
+/* defined by chip */
+enum hinic_fault_err_level {
+	/* default err_level=FAULT_LEVEL_FATAL if
+	 * type==FAULT_TYPE_MEM_RD_TIMEOUT || FAULT_TYPE_MEM_WR_TIMEOUT ||
+	 *	 FAULT_TYPE_REG_RD_TIMEOUT || FAULT_TYPE_REG_WR_TIMEOUT ||
+	 *	 FAULT_TYPE_UCODE
+	 * other: err_level in event.chip.err_level if type==FAULT_TYPE_CHIP
+	 */
+	FAULT_LEVEL_FATAL,
+	FAULT_LEVEL_SERIOUS_RESET,
+	FAULT_LEVEL_SERIOUS_FLR,
+	FAULT_LEVEL_GENERAL,
+	FAULT_LEVEL_SUGGESTION,
+	FAULT_LEVEL_MAX
+};
+
+/* defined by chip */
+struct hinic_fault_event {
+	/* enum hinic_fault_type */
+	u8 type;
+	u8 rsvd0[3];
+	union {
+		u32 val[4];
+		/* valid only type==FAULT_TYPE_CHIP */
+		struct {
+			u8 node_id;
+			/* enum hinic_fault_err_level */
+			u8 err_level;
+			u8 err_type;
+			u8 rsvd1;
+			u32 err_csr_addr;
+			u32 err_csr_value;
+		/* func_id valid only err_level==FAULT_LEVEL_SERIOUS_FLR */
+			u16 func_id;
+			u16 rsvd2;
+		} chip;
+
+		/* valid only type==FAULT_TYPE_UCODE */
+		struct {
+			u8 cause_id;
+			u8 core_id;
+			u8 c_id;
+			u8 rsvd3;
+			u32 epc;
+			u32 rsvd4;
+			u32 rsvd5;
+		} ucode;
+
+		/* valid only type==FAULT_TYPE_MEM_RD_TIMEOUT ||
+		 *		FAULT_TYPE_MEM_WR_TIMEOUT
+		 */
+		struct {
+			u32 err_csr_ctrl;
+			u32 err_csr_data;
+			u32 ctrl_tab;
+			u32 mem_index;
+		} mem_timeout;
+
+		/* valid only type==FAULT_TYPE_REG_RD_TIMEOUT ||
+		 *		    FAULT_TYPE_REG_WR_TIMEOUT
+		 */
+		struct {
+			u32 err_csr;
+			u32 rsvd6;
+			u32 rsvd7;
+			u32 rsvd8;
+		} reg_timeout;
+	} event;
+};
+
+struct hinic_cmd_fault_event {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	struct hinic_fault_event event;
+};
+
+struct hinic_mgmt_watchdog_info {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u32 curr_time_h;
+	u32 curr_time_l;
+	u32 task_id;
+	u32 rsv;
+
+	u32 reg[13];
+	u32 pc;
+	u32 lr;
+	u32 cpsr;
+
+	u32 stack_top;
+	u32 stack_bottom;
+	u32 sp;
+	u32 curr_used;
+	u32 peak_used;
+	u32 is_overflow;
+
+	u32 stack_actlen;
+	u8 data[1024];
+};
+
+struct hinic_pcie_dfx_ntc {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	int len;
+	u32 rsvd;
+};
+
+struct hinic_pcie_dfx_info {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u8 host_id;
+	u8 last;
+	u8 rsvd[2];
+	u32 offset;
+
+	u8 data[MAX_PCIE_DFX_BUF_SIZE];
+};
+
+struct ffm_intr_info {
+	u8 node_id;
+	/* error level of the interrupt source */
+	u8 err_level;
+	/* Classification by interrupt source properties */
+	u16 err_type;
+	u32 err_csr_addr;
+	u32 err_csr_value;
+};
+
+struct hinic_board_info {
+	u32	board_type;
+	u32	port_num;
+	u32	port_speed;
+	u32	pcie_width;
+	u32	host_num;
+	u32	pf_num;
+	u32	vf_total_num;
+	u32	tile_num;
+	u32	qcm_num;
+	u32	core_num;
+	u32	work_mode;
+	u32	service_mode;
+	u32	pcie_mode;
+	u32	cfg_addr;
+	u32	boot_sel;
+};
+
+struct hinic_comm_board_info {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	struct hinic_board_info info;
+
+	u32	rsvd1[5];
+};
+
+struct hi30_ctle_data {
+	u8 ctlebst[3];
+	u8 ctlecmband[3];
+	u8 ctlermband[3];
+	u8 ctleza[3];
+	u8 ctlesqh[3];
+	u8 ctleactgn[3];
+	u8 ctlepassgn;
+};
+
+struct hi30_ffe_data {
+	u8 PRE2;
+	u8 PRE1;
+	u8 POST1;
+	u8 POST2;
+	u8 MAIN;
+};
+
+enum hilink_fec_type {
+	HILINK_FEC_RSFEC,
+	HILINK_FEC_BASEFEC,
+	HILINK_FEC_NOFEC,
+	HILINK_FEC_MAX_TYPE,
+};
+
+enum hinic_link_port_type {
+	LINK_PORT_FIBRE	= 1,
+	LINK_PORT_ELECTRIC,
+	LINK_PORT_COPPER,
+	LINK_PORT_AOC,
+	LINK_PORT_BACKPLANE,
+	LINK_PORT_BASET,
+	LINK_PORT_MAX_TYPE,
+};
+
+enum hilink_fibre_subtype {
+	FIBRE_SUBTYPE_SR = 1,
+	FIBRE_SUBTYPE_LR,
+	FIBRE_SUBTYPE_MAX,
+};
+
+struct hinic_link_info {
+	u8	vendor_name[16];
+	/* port type:
+	 * 1 - fiber; 2 - electric; 3 - copper; 4 - AOC; 5 - backplane;
+	 * 6 - baseT; 0xffff - unknown
+	 *
+	 * port subtype:
+	 * Only when port_type is fiber:
+	 * 1 - SR; 2 - LR
+	 */
+	u32	port_type;
+	u32	port_sub_type;
+	u32	cable_length;
+	u8	cable_temp;
+	u8	cable_max_speed;/* 1(G)/10(G)/25(G)... */
+	u8	sfp_type;	/* 0 - qsfp; 1 - sfp */
+	u8	rsvd0;
+	u32	power[4];	/* uW; if is sfp, only power[2] is valid */
+
+	u8	an_state;	/* 0 - off; 1 - on */
+	u8	fec;		/* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */
+	u16	speed;		/* 1(G)/10(G)/25(G)... */
+
+	u8	cable_absent;	/* 0 - cable present; 1 - cable unpresent */
+	u8	alos;		/* 0 - yes; 1 - no */
+	u8	rx_los;		/* 0 - yes; 1 - no */
+	u8	pma_status;
+	u32	pma_dbg_info_reg;	/* pma debug info: */
+	u32	pma_signal_ok_reg;	/* signal ok: */
+
+	u32	pcs_err_blk_cnt_reg;	/* error block counter: */
+	u32	rf_lf_status_reg;	/* RF/LF status: */
+	u8	pcs_link_reg;		/* pcs link: */
+	u8	mac_link_reg;		/* mac link: */
+	u8	mac_tx_en;
+	u8	mac_rx_en;
+	u32	pcs_err_cnt;
+
+	u8	lane_used;
+	u8	hi30_ffe[5];
+	u8	hi30_ctle[19];
+	u8	hi30_dfe[14];
+	u8	rsvd4;
+};
+
+struct hinic_hilink_link_info {
+	struct hinic_mgmt_msg_head mgmt_msg_head;
+
+	u16	port_id;
+	u8	info_type;	/* 1: link up  2: link down  3 cable plugged */
+	u8	rsvd1;
+
+	struct hinic_link_info info;
+
+	u8	rsvd2[780];
+};
+
+/* dma os dependency implementation */
+struct hinic_os_dep {
+	/* kernel dma alloc api */
+	rte_atomic32_t dma_alloc_cnt;
+	rte_spinlock_t  dma_hash_lock;
+	struct rte_hash *dma_addr_hash;
+};
+
+struct nic_interrupt_info {
+	u32 lli_set;
+	u32 interrupt_coalesc_set;
+	u16 msix_index;
+	u8 lli_credit_limit;
+	u8 lli_timer_cfg;
+	u8 pending_limt;
+	u8 coalesc_timer_cfg;
+	u8 resend_timer_cfg;
+};
+
+struct hinic_sq_attr {
+	u8 dma_attr_off;
+	u8 pending_limit;
+	u8 coalescing_time;
+	u8 intr_en;
+	u16 intr_idx;
+	u32 l2nic_sqn;
+	/* bit[63:2] is addr's high 62bit, bit[0] is valid flag */
+	u64 ci_dma_base;
+};
+
+struct hinic_hwdev {
+	struct rte_pci_device *pcidev_hdl;
+	u32 ffm_num;
+
+	/* dma memory allocator */
+	struct hinic_os_dep os_dep;
+	struct hinic_hwif *hwif;
+	struct cfg_mgmt_info *cfg_mgmt;
+	struct hinic_aeqs *aeqs;
+	struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
+	struct hinic_cmdqs *cmdqs;
+	struct hinic_nic_io *nic_io;
+
+};
+
+int hinic_osdep_init(struct hinic_hwdev *hwdev);
+
+void hinic_osdep_deinit(struct hinic_hwdev *hwdev);
+
+void dma_free_coherent_volatile(void *hwdev, size_t size,
+				volatile void *virt, dma_addr_t phys);
+
+int hinic_get_board_info(void *hwdev, struct hinic_board_info *info);
+
+int hinic_set_ci_table(void *hwdev, u16 q_id, struct hinic_sq_attr *attr);
+
+int hinic_func_rx_tx_flush(struct hinic_hwdev *hwdev);
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+			    struct nic_interrupt_info interrupt_info);
+
+int init_aeqs_msix_attr(void *hwdev);
+
+void hinic_comm_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+				   void *buf_in, u16 in_size,
+				   void *buf_out, u16 *out_size);
+
+void hinic_l2nic_async_event_handle(struct hinic_hwdev *hwdev, void *param,
+				    u8 cmd, void *buf_in, u16 in_size,
+				    void *buf_out, u16 *out_size);
+
+void hinic_hilink_async_event_handle(struct hinic_hwdev *hwdev, u8 cmd,
+				     void *buf_in, u16 in_size, void *buf_out,
+				     u16 *out_size);
+
+int hinic_init_attr_table(struct hinic_hwdev *hwdev);
+
+int hinic_activate_hwdev_state(struct hinic_hwdev *hwdev);
+
+void hinic_deactivate_hwdev_state(struct hinic_hwdev *hwdev);
+
+int hinic_l2nic_reset(struct hinic_hwdev *hwdev);
+
+int hinic_set_pagesize(void *hwdev, u8 page_size);
+
+#endif /* _HINIC_PMD_HWDEV_H_ */
-- 
2.18.0


  parent reply	other threads:[~2019-06-19 16:21 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-19 15:45 [dpdk-dev] [PATCH v5 00/15] A new net PMD - hinic Ziyang Xuan
2019-06-19 15:46 ` [dpdk-dev] [PATCH v5 01/15] net/hinic/base: add HW registers definition Ziyang Xuan
2019-06-19 15:50 ` [dpdk-dev] [PATCH v5 02/15] net/hinic/base: add HW interfaces of bar operation Ziyang Xuan
2019-06-19 15:47   ` Ziyang Xuan
2019-06-19 15:54 ` [dpdk-dev] [PATCH v5 03/15] net/hinic/base: add api command channel code Ziyang Xuan
2019-06-19 15:55 ` [dpdk-dev] [PATCH v5 04/15] net/hinic/base: add support for cmdq mechanism Ziyang Xuan
2019-06-19 15:56 ` [dpdk-dev] [PATCH v5 05/15] net/hinic/base: add eq mechanism function code Ziyang Xuan
2019-06-19 15:57 ` [dpdk-dev] [PATCH v5 06/15] net/hinic/base: add mgmt module " Ziyang Xuan
2019-06-19 16:02 ` Ziyang Xuan [this message]
2019-06-19 15:58   ` [dpdk-dev] [PATCH v5 07/15] net/hinic/base: add code about hardware operation Ziyang Xuan
2019-06-19 16:04 ` [dpdk-dev] [PATCH v5 08/15] net/hinic/base: add nic business configurations Ziyang Xuan
2019-06-19 16:05 ` [dpdk-dev] [PATCH v5 09/15] net/hinic/base: add context and work queue support Ziyang Xuan
2019-06-19 16:08 ` [dpdk-dev] [PATCH v5 10/15] net/hinic: add various headers Ziyang Xuan
2019-06-26 11:54   ` Ferruh Yigit
2019-06-19 16:09 ` [dpdk-dev] [PATCH v5 11/15] net/hinic: add hinic PMD build and doc files Ziyang Xuan
2019-06-19 16:13   ` Ziyang Xuan
2019-06-26 11:55   ` Ferruh Yigit
2019-06-19 16:16 ` [dpdk-dev] [PATCH v5 12/15] net/hinic: add device initailization Ziyang Xuan
2019-06-19 16:14   ` Ziyang Xuan
2019-06-19 16:30   ` Ziyang Xuan
2019-06-19 16:18 ` [dpdk-dev] [PATCH v5 13/15] net/hinic: add start stop close queue ops Ziyang Xuan
2019-06-19 16:32   ` Ziyang Xuan
2019-06-19 16:20 ` [dpdk-dev] [PATCH v5 14/15] net/hinic: add tx/rx package burst Ziyang Xuan
2019-06-19 16:25   ` Ziyang Xuan
2019-06-26 11:54   ` Ferruh Yigit
2019-06-26 15:58     ` [dpdk-dev] 答复: " Xuanziyang (William, Chip Application Design Logic and Hardware Development Dept IT_Products & Solutions)
2019-06-26 16:05       ` Ferruh Yigit
2019-06-19 16:23 ` [dpdk-dev] [PATCH v5 15/15] net/hinic: add rss stats promisc ops Ziyang Xuan
2019-06-26 11:56 ` [dpdk-dev] [PATCH v5 00/15] A new net PMD - hinic Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1d0d1cb3bdd9f22e07063f36d7545d26e8d8b9df.1560958308.git.xuanziyang2@huawei.com \
    --to=xuanziyang2@huawei.com \
    --cc=cloud.wangxiaoyun@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=luoxianjun@huawei.com \
    --cc=shahar.belkar@huawei.com \
    --cc=zhouguoyang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).