(np)network Processor initialize resources in host,
and initialize a channel for some tables insert/get/del.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/meson.build   |   1 +
 drivers/net/zxdh/zxdh_ethdev.c | 234 +++++++++++++++++++++--
 drivers/net/zxdh/zxdh_ethdev.h |  30 +++
 drivers/net/zxdh/zxdh_msg.c    |  44 +++++
 drivers/net/zxdh/zxdh_msg.h    |  37 ++++
 drivers/net/zxdh/zxdh_np.c     | 340 +++++++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_np.h     | 198 +++++++++++++++++++
 drivers/net/zxdh/zxdh_pci.c    |   2 +-
 drivers/net/zxdh/zxdh_pci.h    |   6 +-
 drivers/net/zxdh/zxdh_queue.c  |   2 +-
 drivers/net/zxdh/zxdh_queue.h  |  14 +-
 11 files changed, 875 insertions(+), 33 deletions(-)
 create mode 100644 drivers/net/zxdh/zxdh_np.c
 create mode 100644 drivers/net/zxdh/zxdh_np.h

diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index c9960f4c73..ab24a3145c 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -19,4 +19,5 @@ sources = files(
         'zxdh_msg.c',
         'zxdh_pci.c',
         'zxdh_queue.c',
+        'zxdh_np.c',
 )
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index c786198535..b8f4415e00 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -5,6 +5,7 @@
 #include <ethdev_pci.h>
 #include <bus_pci_driver.h>
 #include <rte_ethdev.h>
+#include <rte_malloc.h>
 
 #include "zxdh_ethdev.h"
 #include "zxdh_logs.h"
@@ -12,8 +13,15 @@
 #include "zxdh_msg.h"
 #include "zxdh_common.h"
 #include "zxdh_queue.h"
+#include "zxdh_np.h"
 
 struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
+struct zxdh_shared_data *zxdh_shared_data;
+const char *ZXDH_PMD_SHARED_DATA_MZ = "zxdh_pmd_shared_data";
+rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+struct zxdh_dtb_shared_data g_dtb_data;
+
+#define ZXDH_INVALID_DTBQUE  0xFFFF
 
 uint16_t
 zxdh_vport_to_vfid(union zxdh_virport_num v)
@@ -406,14 +414,14 @@ zxdh_features_update(struct zxdh_hw *hw,
     ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features);
 
     if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) &&
-         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {
+         !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {
         PMD_DRV_LOG(ERR, "rx checksum not available on this host");
         return -ENOTSUP;
     }
 
     if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
-        (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
-         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {
+        (!zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+         !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {
         PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");
         return -ENOTSUP;
     }
@@ -421,20 +429,20 @@ zxdh_features_update(struct zxdh_hw *hw,
 }
 
 static bool
-rx_offload_enabled(struct zxdh_hw *hw)
+zxdh_rx_offload_enabled(struct zxdh_hw *hw)
 {
-    return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||
-           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
-           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);
+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||
+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);
 }
 
 static bool
-tx_offload_enabled(struct zxdh_hw *hw)
+zxdh_tx_offload_enabled(struct zxdh_hw *hw)
 {
-    return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||
-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||
-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||
-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);
+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_CSUM) ||
+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||
+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||
+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_UFO);
 }
 
 static void
@@ -466,7 +474,7 @@ zxdh_dev_free_mbufs(struct rte_eth_dev *dev)
             continue;
         PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);
 
-        while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL)
+        while ((buf = zxdh_queue_detach_unused(vq)) != NULL)
             rte_pktmbuf_free(buf);
     }
 }
@@ -550,9 +558,9 @@ zxdh_init_vring(struct zxdh_virtqueue *vq)
     vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
     vq->vq_free_cnt = vq->vq_nentries;
     memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries);
-    vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);
-    vring_desc_init_packed(vq, size);
-    virtqueue_disable_intr(vq);
+    zxdh_vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);
+    zxdh_vring_desc_init_packed(vq, size);
+    zxdh_queue_disable_intr(vq);
 }
 
 static int32_t
@@ -621,7 +629,7 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
     /*
      * Reserve a memzone for vring elements
      */
-    size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);
+    size = zxdh_vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);
     vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);
     PMD_DRV_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
 
@@ -694,7 +702,8 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
             /* first indirect descriptor is always the tx header */
             struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir;
 
-            vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));
+            zxdh_vring_desc_init_indirect_packed(start_dp,
+                    RTE_DIM(txr[i].tx_packed_indir));
             start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) +
                     offsetof(struct zxdh_tx_region, tx_hdr);
             /* length will be updated to actual pi hdr size when xmit pkt */
@@ -792,8 +801,8 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
         }
     }
 
-    hw->has_tx_offload = tx_offload_enabled(hw);
-    hw->has_rx_offload = rx_offload_enabled(hw);
+    hw->has_tx_offload = zxdh_tx_offload_enabled(hw);
+    hw->has_rx_offload = zxdh_rx_offload_enabled(hw);
 
     nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;
     if (nr_vq == hw->queue_num)
@@ -881,7 +890,7 @@ zxdh_init_device(struct rte_eth_dev *eth_dev)
     rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);
 
     /* If host does not support both status and MSI-X then disable LSC */
-    if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE)
+    if (zxdh_pci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE)
         eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
     else
         eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
@@ -913,6 +922,181 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
     return 0;
 }
 
+static int
+zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_bar_offset_params param = {0};
+    struct zxdh_bar_offset_res res = {0};
+    int ret = 0;
+
+    if (g_dtb_data.init_done) {
+        PMD_DRV_LOG(DEBUG, "DTB res already init done, dev %s no need init",
+            dev->device->name);
+        return 0;
+    }
+    g_dtb_data.queueid = ZXDH_INVALID_DTBQUE;
+    g_dtb_data.bind_device = dev;
+    g_dtb_data.dev_refcnt++;
+    g_dtb_data.init_done = 1;
+
+    ZXDH_DEV_INIT_CTRL_T *dpp_ctrl = rte_zmalloc(NULL, sizeof(*dpp_ctrl) +
+            sizeof(ZXDH_DTB_ADDR_INFO_T) * 256, 0);
+    if (dpp_ctrl == NULL) {
+        PMD_DRV_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);
+        ret = -ENOMEM;
+        goto free_res;
+    }
+    dpp_ctrl->queue_id = 0xff;
+    dpp_ctrl->vport = hw->vport.vport;
+    dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;
+    strlcpy(dpp_ctrl->port_name, dev->device->name, sizeof(dpp_ctrl->port_name));
+    dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0];
+
+    param.pcie_id = hw->pcie_id;
+    param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
+    param.type = ZXDH_URI_NP;
+
+    ret = zxdh_get_bar_offset(&param, &res);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);
+        goto free_res;
+    }
+    dpp_ctrl->np_bar_len = res.bar_length;
+    dpp_ctrl->np_bar_offset = res.bar_offset;
+
+    if (!g_dtb_data.dtb_table_conf_mz) {
+        const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",
+                ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+
+        if (conf_mz == NULL) {
+            PMD_DRV_LOG(ERR,
+                "dev %s annot allocate memory for dtb table conf",
+                dev->device->name);
+            ret = -ENOMEM;
+            goto free_res;
+        }
+        dpp_ctrl->down_vir_addr = conf_mz->addr_64;
+        dpp_ctrl->down_phy_addr = conf_mz->iova;
+        g_dtb_data.dtb_table_conf_mz = conf_mz;
+    }
+
+    if (!g_dtb_data.dtb_table_dump_mz) {
+        const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",
+                ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+
+        if (dump_mz == NULL) {
+            PMD_DRV_LOG(ERR,
+                "dev %s Cannot allocate memory for dtb table dump",
+                dev->device->name);
+            ret = -ENOMEM;
+            goto free_res;
+        }
+        dpp_ctrl->dump_vir_addr = dump_mz->addr_64;
+        dpp_ctrl->dump_phy_addr = dump_mz->iova;
+        g_dtb_data.dtb_table_dump_mz = dump_mz;
+    }
+
+    ret = zxdh_np_host_init(0, dpp_ctrl);
+    if (ret) {
+        PMD_DRV_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);
+        goto free_res;
+    }
+
+    PMD_DRV_LOG(DEBUG, "dev %s dpp host np init ok.dtb queue %d",
+        dev->device->name, dpp_ctrl->queue_id);
+    g_dtb_data.queueid = dpp_ctrl->queue_id;
+    rte_free(dpp_ctrl);
+    return 0;
+
+free_res:
+    rte_free(dpp_ctrl);
+    return ret;
+}
+
+static int
+zxdh_init_shared_data(void)
+{
+    const struct rte_memzone *mz;
+    int ret = 0;
+
+    rte_spinlock_lock(&zxdh_shared_data_lock);
+    if (zxdh_shared_data == NULL) {
+        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+            /* Allocate shared memory. */
+            mz = rte_memzone_reserve(ZXDH_PMD_SHARED_DATA_MZ,
+                    sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);
+            if (mz == NULL) {
+                PMD_DRV_LOG(ERR, "Cannot allocate zxdh shared data");
+                ret = -rte_errno;
+                goto error;
+            }
+            zxdh_shared_data = mz->addr;
+            memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));
+            rte_spinlock_init(&zxdh_shared_data->lock);
+        } else { /* Lookup allocated shared memory. */
+            mz = rte_memzone_lookup(ZXDH_PMD_SHARED_DATA_MZ);
+            if (mz == NULL) {
+                PMD_DRV_LOG(ERR, "Cannot attach zxdh shared data");
+                ret = -rte_errno;
+                goto error;
+            }
+            zxdh_shared_data = mz->addr;
+        }
+    }
+
+error:
+    rte_spinlock_unlock(&zxdh_shared_data_lock);
+    return ret;
+}
+
+static int
+zxdh_init_once(void)
+{
+    int ret = 0;
+
+    if (zxdh_init_shared_data())
+        return -1;
+
+    struct zxdh_shared_data *sd = zxdh_shared_data;
+    rte_spinlock_lock(&sd->lock);
+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+        if (!sd->init_done) {
+            ++sd->secondary_cnt;
+            sd->init_done = true;
+        }
+        goto out;
+    }
+    /* RTE_PROC_PRIMARY */
+    if (!sd->init_done)
+        sd->init_done = true;
+    sd->dev_refcnt++;
+
+out:
+    rte_spinlock_unlock(&sd->lock);
+    return ret;
+}
+
+static int
+zxdh_np_init(struct rte_eth_dev *eth_dev)
+{
+    struct zxdh_hw *hw = eth_dev->data->dev_private;
+    int ret = 0;
+
+    if (hw->is_pf) {
+        ret = zxdh_np_dtb_res_init(eth_dev);
+        if (ret) {
+            PMD_DRV_LOG(ERR, "np dtb init failed, ret:%d ", ret);
+            return ret;
+        }
+    }
+    if (zxdh_shared_data != NULL)
+        zxdh_shared_data->np_init_done = 1;
+
+    PMD_DRV_LOG(DEBUG, "np init ok ");
+    return 0;
+}
+
 static int
 zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
@@ -950,6 +1134,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
         hw->is_pf = 1;
     }
 
+    ret = zxdh_init_once();
+    if (ret != 0)
+        goto err_zxdh_init;
+
     ret = zxdh_init_device(eth_dev);
     if (ret < 0)
         goto err_zxdh_init;
@@ -977,6 +1165,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
     if (ret != 0)
         goto err_zxdh_init;
 
+    ret = zxdh_np_init(eth_dev);
+    if (ret)
+        goto err_zxdh_init;
+
     ret = zxdh_configure_intr(eth_dev);
     if (ret != 0)
         goto err_zxdh_init;
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 7658cbb461..b1f398b28e 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -35,6 +35,12 @@
 
 #define ZXDH_MBUF_BURST_SZ        64
 
+#define ZXDH_MAX_BASE_DTB_TABLE_COUNT   30
+#define ZXDH_DTB_TABLE_DUMP_SIZE        (32 * (16 + 16 * 1024))
+#define ZXDH_DTB_TABLE_CONF_SIZE        (32 * (16 + 16 * 1024))
+
+#define ZXDH_MAX_NAME_LEN               32
+
 union zxdh_virport_num {
     uint16_t vport;
     struct {
@@ -89,6 +95,30 @@ struct zxdh_hw {
     uint8_t has_rx_offload;
 };
 
+struct zxdh_dtb_shared_data {
+    uint8_t init_done;
+    char name[ZXDH_MAX_NAME_LEN];
+    uint16_t queueid;
+    uint16_t vport;
+    uint32_t vector;
+    const struct rte_memzone *dtb_table_conf_mz;
+    const struct rte_memzone *dtb_table_dump_mz;
+    const struct rte_memzone *dtb_table_bulk_dump_mz[ZXDH_MAX_BASE_DTB_TABLE_COUNT];
+    struct rte_eth_dev *bind_device;
+    uint32_t dev_refcnt;
+};
+
+/* Shared data between primary and secondary processes. */
+struct zxdh_shared_data {
+    rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */
+    int32_t init_done;       /* Whether primary has done initialization. */
+    unsigned int secondary_cnt; /* Number of secondary processes init'd. */
+
+    int32_t np_init_done;
+    uint32_t dev_refcnt;
+    struct zxdh_dtb_shared_data *dtb_data;
+};
+
 uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v);
 
 #endif /* ZXDH_ETHDEV_H */
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 53cf972f86..dd7a518a51 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -1035,3 +1035,47 @@ zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)
     rte_free(recved_msg);
     return ZXDH_BAR_MSG_OK;
 }
+
+int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras,
+        struct zxdh_bar_offset_res *res)
+{
+    uint16_t check_token;
+    uint16_t sum_res;
+    int ret;
+
+    if (!paras)
+        return ZXDH_BAR_MSG_ERR_NULL;
+
+    struct zxdh_offset_get_msg send_msg = {
+        .pcie_id = paras->pcie_id,
+        .type = paras->type,
+    };
+    struct zxdh_pci_bar_msg in = {
+        .payload_addr = &send_msg,
+        .payload_len = sizeof(send_msg),
+        .virt_addr = paras->virt_addr,
+        .src = ZXDH_MSG_CHAN_END_PF,
+        .dst = ZXDH_MSG_CHAN_END_RISC,
+        .module_id = ZXDH_BAR_MODULE_OFFSET_GET,
+        .src_pcieid = paras->pcie_id,
+    };
+    struct zxdh_bar_recv_msg recv_msg = {0};
+    struct zxdh_msg_recviver_mem result = {
+        .recv_buffer = &recv_msg,
+        .buffer_len = sizeof(recv_msg),
+    };
+    ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+    if (ret != ZXDH_BAR_MSG_OK)
+        return -ret;
+
+    check_token = recv_msg.offset_reps.check;
+    sum_res = zxdh_bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg));
+
+    if (check_token != sum_res) {
+        PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x", sum_res, check_token);
+        return ZXDH_BAR_MSG_ERR_REPLY;
+    }
+    res->bar_offset = recv_msg.offset_reps.offset;
+    res->bar_length = recv_msg.offset_reps.length;
+    return ZXDH_BAR_MSG_OK;
+}
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 530ee406b1..fbc79e8f9d 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -131,6 +131,26 @@ enum ZXDH_TBL_MSG_TYPE {
     ZXDH_TBL_TYPE_NON,
 };
 
+enum pciebar_layout_type {
+    ZXDH_URI_VQM      = 0,
+    ZXDH_URI_SPINLOCK = 1,
+    ZXDH_URI_FWCAP    = 2,
+    ZXDH_URI_FWSHR    = 3,
+    ZXDH_URI_DRS_SEC  = 4,
+    ZXDH_URI_RSV      = 5,
+    ZXDH_URI_CTRLCH   = 6,
+    ZXDH_URI_1588     = 7,
+    ZXDH_URI_QBV      = 8,
+    ZXDH_URI_MACPCS   = 9,
+    ZXDH_URI_RDMA     = 10,
+    ZXDH_URI_MNP      = 11,
+    ZXDH_URI_MSPM     = 12,
+    ZXDH_URI_MVQM     = 13,
+    ZXDH_URI_MDPI     = 14,
+    ZXDH_URI_NP       = 15,
+    ZXDH_URI_MAX,
+};
+
 struct zxdh_msix_para {
     uint16_t pcie_id;
     uint16_t vector_risc;
@@ -174,6 +194,17 @@ struct zxdh_bar_offset_reps {
     uint32_t length;
 } __rte_packed;
 
+struct zxdh_bar_offset_params {
+    uint64_t virt_addr;  /* Bar space control space virtual address */
+    uint16_t pcie_id;
+    uint16_t type;  /* Module types corresponding to PCIBAR planning */
+};
+
+struct zxdh_bar_offset_res {
+    uint32_t bar_offset;
+    uint32_t bar_length;
+};
+
 struct zxdh_bar_recv_msg {
     uint8_t reps_ok;
     uint16_t reps_len;
@@ -204,9 +235,15 @@ struct zxdh_bar_msg_header {
     uint16_t dst_pcieid; /* used in PF-->VF */
 };
 
+struct zxdh_offset_get_msg {
+    uint16_t pcie_id;
+    uint16_t type;
+};
+
 typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
         void *reps_buffer, uint16_t *reps_len, void *dev);
 
+int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, struct zxdh_bar_offset_res *res);
 int zxdh_msg_chan_init(void);
 int zxdh_bar_msg_chan_exit(void);
 int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
new file mode 100644
index 0000000000..e44d7ff501
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_malloc.h>
+
+#include "zxdh_np.h"
+#include "zxdh_logs.h"
+
+static uint64_t g_np_bar_offset;
+static ZXDH_DEV_MGR_T g_dev_mgr;
+static ZXDH_SDT_MGR_T g_sdt_mgr;
+ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX];
+ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX];
+
+#define ZXDH_SDT_MGR_PTR_GET()    (&g_sdt_mgr)
+#define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])
+
+#define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\
+do {\
+    if (NULL == (point)) {\
+        PMD_DRV_LOG(ERR, "dev: %d ZXIC %s:%d[Error:POINT NULL] !"\
+            "FUNCTION : %s!", (dev_id), __FILE__, __LINE__, __func__);\
+        RTE_ASSERT(0);\
+    } \
+} while (0)
+
+#define ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, becall)\
+do {\
+    if ((rc) != 0) {\
+        PMD_DRV_LOG(ERR, "dev: %d ZXIC  %s:%d !"\
+        "-- %s Call %s Fail!", (dev_id), __FILE__, __LINE__, __func__, becall);\
+        RTE_ASSERT(0);\
+    } \
+} while (0)
+
+#define ZXDH_COMM_CHECK_POINT_NO_ASSERT(point)\
+do {\
+    if ((point) == NULL) {\
+        PMD_DRV_LOG(ERR, "ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!",\
+        __FILE__, __LINE__, __func__);\
+    } \
+} while (0)
+
+#define ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, becall)\
+do {\
+    if ((rc) != 0) {\
+        PMD_DRV_LOG(ERR, "ZXIC  %s:%d !-- %s Call %s"\
+        " Fail!", __FILE__, __LINE__, __func__, becall);\
+    } \
+} while (0)
+
+#define ZXDH_COMM_CHECK_RC(rc, becall)\
+do {\
+    if ((rc) != 0) {\
+        PMD_DRV_LOG(ERR, "ZXIC  %s:%d!-- %s Call %s "\
+        "Fail!", __FILE__, __LINE__, __func__, becall);\
+        RTE_ASSERT(0);\
+    } \
+} while (0)
+
+static uint32_t
+zxdh_np_dev_init(void)
+{
+    if (g_dev_mgr.is_init) {
+        PMD_DRV_LOG(ERR, "Dev is already initialized.");
+        return 0;
+    }
+
+    g_dev_mgr.device_num = 0;
+    g_dev_mgr.is_init    = 1;
+
+    return 0;
+}
+
+static uint32_t
+zxdh_np_dev_add(uint32_t  dev_id, ZXDH_DEV_TYPE_E dev_type,
+        ZXDH_DEV_ACCESS_TYPE_E  access_type, uint64_t  pcie_addr,
+        uint64_t  riscv_addr, uint64_t  dma_vir_addr,
+        uint64_t  dma_phy_addr)
+{
+    ZXDH_DEV_CFG_T *p_dev_info = NULL;
+    ZXDH_DEV_MGR_T *p_dev_mgr  = NULL;
+
+    p_dev_mgr = &g_dev_mgr;
+    if (!p_dev_mgr->is_init) {
+        PMD_DRV_LOG(ERR, "ErrorCode[ 0x%x]: Device Manager is not init!!!",
+                                 ZXDH_RC_DEV_MGR_NOT_INIT);
+        return ZXDH_RC_DEV_MGR_NOT_INIT;
+    }
+
+    if (p_dev_mgr->p_dev_array[dev_id] != NULL) {
+        /* device is already exist. */
+        PMD_DRV_LOG(ERR, "Device is added again!!!");
+        p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+    } else {
+        /* device is new. */
+        p_dev_info = rte_malloc(NULL, sizeof(ZXDH_DEV_CFG_T), 0);
+        ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_dev_info);
+        p_dev_mgr->p_dev_array[dev_id] = p_dev_info;
+        p_dev_mgr->device_num++;
+    }
+
+    p_dev_info->device_id   = dev_id;
+    p_dev_info->dev_type    = dev_type;
+    p_dev_info->access_type = access_type;
+    p_dev_info->pcie_addr   = pcie_addr;
+    p_dev_info->riscv_addr   = riscv_addr;
+    p_dev_info->dma_vir_addr = dma_vir_addr;
+    p_dev_info->dma_phy_addr = dma_phy_addr;
+
+    return 0;
+}
+
+static uint32_t
+zxdh_np_dev_agent_status_set(uint32_t dev_id, uint32_t agent_flag)
+{
+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;
+    ZXDH_DEV_CFG_T *p_dev_info = NULL;
+
+    p_dev_mgr = &g_dev_mgr;
+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+    if (p_dev_info == NULL)
+        return ZXDH_DEV_TYPE_INVALID;
+    p_dev_info->agent_flag = agent_flag;
+
+    return 0;
+}
+
+static void
+zxdh_np_sdt_mgr_init(void)
+{
+    if (!g_sdt_mgr.is_init) {
+        g_sdt_mgr.channel_num = 0;
+        g_sdt_mgr.is_init = 1;
+        memset(g_sdt_mgr.sdt_tbl_array, 0, ZXDH_DEV_CHANNEL_MAX *
+            sizeof(ZXDH_SDT_SOFT_TABLE_T *));
+    }
+}
+
+static uint32_t
+zxdh_np_sdt_mgr_create(uint32_t dev_id)
+{
+    ZXDH_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL;
+    ZXDH_SDT_MGR_T *p_sdt_mgr = NULL;
+
+    p_sdt_mgr = ZXDH_SDT_MGR_PTR_GET();
+
+    if (ZXDH_SDT_SOFT_TBL_GET(dev_id) == NULL) {
+        p_sdt_tbl_temp = rte_malloc(NULL, sizeof(ZXDH_SDT_SOFT_TABLE_T), 0);
+
+        p_sdt_tbl_temp->device_id = dev_id;
+        memset(p_sdt_tbl_temp->sdt_array, 0, ZXDH_DEV_SDT_ID_MAX * sizeof(ZXDH_SDT_ITEM_T));
+
+        ZXDH_SDT_SOFT_TBL_GET(dev_id) = p_sdt_tbl_temp;
+
+        p_sdt_mgr->channel_num++;
+    } else {
+        PMD_DRV_LOG(ERR, "Error: %s for dev[%d]"
+            "is called repeatedly!", __func__, dev_id);
+        return 1;
+    }
+
+    return 0;
+}
+
+static uint32_t
+zxdh_np_sdt_init(uint32_t dev_num, uint32_t *dev_id_array)
+{
+    uint32_t rc;
+    uint32_t i;
+
+    zxdh_np_sdt_mgr_init();
+
+    for (i = 0; i < dev_num; i++) {
+        rc = zxdh_np_sdt_mgr_create(dev_id_array[i]);
+        ZXDH_COMM_CHECK_RC(rc, "zxdh_sdt_mgr_create");
+    }
+
+    return rc;
+}
+
+static void
+zxdh_np_ppu_parse_cls_bitmap(uint32_t dev_id,
+                                uint32_t bitmap)
+{
+    uint32_t cls_id;
+    uint32_t mem_id;
+    uint32_t cls_use;
+    uint32_t instr_mem;
+
+    for (cls_id = 0; cls_id < ZXDH_PPU_CLUSTER_NUM; cls_id++) {
+        cls_use = (bitmap >> cls_id) & 0x1;
+        g_ppu_cls_bit_map[dev_id].cls_use[cls_id] = cls_use;
+    }
+
+    for (mem_id = 0; mem_id < ZXDH_PPU_INSTR_MEM_NUM; mem_id++) {
+        instr_mem = (bitmap >> (mem_id * 2)) & 0x3;
+        g_ppu_cls_bit_map[dev_id].instr_mem[mem_id] = ((instr_mem > 0) ? 1 : 0);
+    }
+}
+
+static ZXDH_DTB_MGR_T *
+zxdh_np_dtb_mgr_get(uint32_t dev_id)
+{
+    if (dev_id >= ZXDH_DEV_CHANNEL_MAX)
+        return NULL;
+    else
+        return p_dpp_dtb_mgr[dev_id];
+}
+
+static uint32_t
+zxdh_np_dtb_soft_init(uint32_t dev_id)
+{
+    ZXDH_DTB_MGR_T *p_dtb_mgr = NULL;
+
+    if (dev_id >= ZXDH_DEV_CHANNEL_MAX)
+        return 1;
+
+    p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id);
+    if (p_dtb_mgr == NULL) {
+        p_dpp_dtb_mgr[dev_id] = rte_zmalloc(NULL, sizeof(ZXDH_DTB_MGR_T), 0);
+        p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id);
+        if (p_dtb_mgr == NULL)
+            return 1;
+    }
+
+    return 0;
+}
+
+static uint32_t
+zxdh_np_base_soft_init(uint32_t dev_id, ZXDH_SYS_INIT_CTRL_T *p_init_ctrl)
+{
+    uint32_t dev_id_array[ZXDH_DEV_CHANNEL_MAX] = {0};
+    uint32_t rt;
+    uint32_t access_type;
+    uint32_t agent_flag;
+
+    rt = zxdh_np_dev_init();
+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_init");
+
+    if (p_init_ctrl->flags & ZXDH_INIT_FLAG_ACCESS_TYPE)
+        access_type = ZXDH_DEV_ACCESS_TYPE_RISCV;
+    else
+        access_type = ZXDH_DEV_ACCESS_TYPE_PCIE;
+
+    if (p_init_ctrl->flags & ZXDH_INIT_FLAG_AGENT_FLAG)
+        agent_flag = ZXDH_DEV_AGENT_ENABLE;
+    else
+        agent_flag = ZXDH_DEV_AGENT_DISABLE;
+
+    rt = zxdh_np_dev_add(dev_id,
+                     p_init_ctrl->device_type,
+                     access_type,
+                     p_init_ctrl->pcie_vir_baddr,
+                     p_init_ctrl->riscv_vir_baddr,
+                     p_init_ctrl->dma_vir_baddr,
+                     p_init_ctrl->dma_phy_baddr);
+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_add");
+
+    rt = zxdh_np_dev_agent_status_set(dev_id, agent_flag);
+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_agent_status_set");
+
+    dev_id_array[0] = dev_id;
+    rt = zxdh_np_sdt_init(1, dev_id_array);
+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_sdt_init");
+
+    zxdh_np_ppu_parse_cls_bitmap(dev_id, ZXDH_PPU_CLS_ALL_START);
+
+    rt = zxdh_np_dtb_soft_init(dev_id);
+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dtb_soft_init");
+
+    return rt;
+}
+
+static void
+zxdh_np_dev_vport_set(uint32_t dev_id, uint32_t vport)
+{
+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;
+    ZXDH_DEV_CFG_T *p_dev_info = NULL;
+
+    p_dev_mgr =  &g_dev_mgr;
+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+    p_dev_info->vport = vport;
+}
+
+static void
+zxdh_np_dev_agent_addr_set(uint32_t dev_id, uint64_t agent_addr)
+{
+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;
+    ZXDH_DEV_CFG_T *p_dev_info = NULL;
+
+    p_dev_mgr =  &g_dev_mgr;
+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+    p_dev_info->agent_addr = agent_addr;
+}
+
+static uint64_t
+zxdh_np_addr_calc(uint64_t pcie_vir_baddr, uint32_t bar_offset)
+{
+    uint64_t np_addr;
+
+    np_addr = ((pcie_vir_baddr + bar_offset) > ZXDH_PCIE_NP_MEM_SIZE)
+                ? (pcie_vir_baddr + bar_offset - ZXDH_PCIE_NP_MEM_SIZE) : 0;
+    g_np_bar_offset = bar_offset;
+
+    return np_addr;
+}
+
+int
+zxdh_np_host_init(uint32_t dev_id,
+        ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl)
+{
+    ZXDH_SYS_INIT_CTRL_T sys_init_ctrl = {0};
+    uint32_t rc;
+    uint64_t agent_addr;
+
+    ZXDH_COMM_CHECK_POINT_NO_ASSERT(p_dev_init_ctrl);
+
+    sys_init_ctrl.flags = (ZXDH_DEV_ACCESS_TYPE_PCIE << 0) | (ZXDH_DEV_AGENT_ENABLE << 10);
+    sys_init_ctrl.pcie_vir_baddr = zxdh_np_addr_calc(p_dev_init_ctrl->pcie_vir_addr,
+        p_dev_init_ctrl->np_bar_offset);
+    sys_init_ctrl.device_type = ZXDH_DEV_TYPE_CHIP;
+    rc = zxdh_np_base_soft_init(dev_id, &sys_init_ctrl);
+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_base_soft_init");
+
+    zxdh_np_dev_vport_set(dev_id, p_dev_init_ctrl->vport);
+
+    agent_addr = ZXDH_PCIE_AGENT_ADDR_OFFSET + p_dev_init_ctrl->pcie_vir_addr;
+    zxdh_np_dev_agent_addr_set(dev_id, agent_addr);
+
+    return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
new file mode 100644
index 0000000000..573eafe796
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 ZTE Corporation
+ */
+
+#ifndef ZXDH_NP_H
+#define ZXDH_NP_H
+
+#include <stdint.h>
+
+#define ZXDH_PORT_NAME_MAX                    (32)
+#define ZXDH_DEV_CHANNEL_MAX                  (2)
+#define ZXDH_DEV_SDT_ID_MAX                   (256U)
+/*DTB*/
+#define ZXDH_DTB_QUEUE_ITEM_NUM_MAX           (32)
+#define ZXDH_DTB_QUEUE_NUM_MAX                (128)
+
+#define ZXDH_PPU_CLS_ALL_START                (0x3F)
+#define ZXDH_PPU_CLUSTER_NUM                  (6)
+#define ZXDH_PPU_INSTR_MEM_NUM                (3)
+#define ZXDH_SDT_CFG_LEN                      (2)
+
+#define ZXDH_RC_DEV_BASE                      (0x600)
+#define ZXDH_RC_DEV_PARA_INVALID              (ZXDH_RC_DEV_BASE | 0x0)
+#define ZXDH_RC_DEV_RANGE_INVALID             (ZXDH_RC_DEV_BASE | 0x1)
+#define ZXDH_RC_DEV_CALL_FUNC_FAIL            (ZXDH_RC_DEV_BASE | 0x2)
+#define ZXDH_RC_DEV_TYPE_INVALID              (ZXDH_RC_DEV_BASE | 0x3)
+#define ZXDH_RC_DEV_CONNECT_FAIL              (ZXDH_RC_DEV_BASE | 0x4)
+#define ZXDH_RC_DEV_MSG_INVALID               (ZXDH_RC_DEV_BASE | 0x5)
+#define ZXDH_RC_DEV_NOT_EXIST                 (ZXDH_RC_DEV_BASE | 0x6)
+#define ZXDH_RC_DEV_MGR_NOT_INIT              (ZXDH_RC_DEV_BASE | 0x7)
+#define ZXDH_RC_DEV_CFG_NOT_INIT              (ZXDH_RC_DEV_BASE | 0x8)
+
+#define ZXDH_SYS_VF_NP_BASE_OFFSET      0
+#define ZXDH_PCIE_DTB4K_ADDR_OFFSET     (0x6000)
+#define ZXDH_PCIE_NP_MEM_SIZE           (0x2000000)
+#define ZXDH_PCIE_AGENT_ADDR_OFFSET     (0x2000)
+
+#define ZXDH_INIT_FLAG_ACCESS_TYPE      (1 << 0)
+#define ZXDH_INIT_FLAG_SERDES_DOWN_TP   (1 << 1)
+#define ZXDH_INIT_FLAG_DDR_BACKDOOR     (1 << 2)
+#define ZXDH_INIT_FLAG_SA_MODE          (1 << 3)
+#define ZXDH_INIT_FLAG_SA_MESH          (1 << 4)
+#define ZXDH_INIT_FLAG_SA_SERDES_MODE   (1 << 5)
+#define ZXDH_INIT_FLAG_INT_DEST_MODE    (1 << 6)
+#define ZXDH_INIT_FLAG_LIF0_MODE        (1 << 7)
+#define ZXDH_INIT_FLAG_DMA_ENABLE       (1 << 8)
+#define ZXDH_INIT_FLAG_TM_IMEM_FLAG     (1 << 9)
+#define ZXDH_INIT_FLAG_AGENT_FLAG       (1 << 10)
+
+typedef enum zxdh_module_init_e {
+    ZXDH_MODULE_INIT_NPPU = 0,
+    ZXDH_MODULE_INIT_PPU,
+    ZXDH_MODULE_INIT_SE,
+    ZXDH_MODULE_INIT_ETM,
+    ZXDH_MODULE_INIT_DLB,
+    ZXDH_MODULE_INIT_TRPG,
+    ZXDH_MODULE_INIT_TSN,
+    ZXDH_MODULE_INIT_MAX
+} ZXDH_MODULE_INIT_E;
+
+typedef enum zxdh_dev_type_e {
+    ZXDH_DEV_TYPE_SIM  = 0,
+    ZXDH_DEV_TYPE_VCS  = 1,
+    ZXDH_DEV_TYPE_CHIP = 2,
+    ZXDH_DEV_TYPE_FPGA = 3,
+    ZXDH_DEV_TYPE_PCIE_ACC = 4,
+    ZXDH_DEV_TYPE_INVALID,
+} ZXDH_DEV_TYPE_E;
+
+typedef enum zxdh_dev_access_type_e {
+    ZXDH_DEV_ACCESS_TYPE_PCIE = 0,
+    ZXDH_DEV_ACCESS_TYPE_RISCV = 1,
+    ZXDH_DEV_ACCESS_TYPE_INVALID,
+} ZXDH_DEV_ACCESS_TYPE_E;
+
+typedef enum zxdh_dev_agent_flag_e {
+    ZXDH_DEV_AGENT_DISABLE = 0,
+    ZXDH_DEV_AGENT_ENABLE = 1,
+    ZXDH_DEV_AGENT_INVALID,
+} ZXDH_DEV_AGENT_FLAG_E;
+
+typedef struct zxdh_dtb_tab_up_user_addr_t {
+    uint32_t user_flag;
+    uint64_t phy_addr;
+    uint64_t vir_addr;
+} ZXDH_DTB_TAB_UP_USER_ADDR_T;
+
+typedef struct zxdh_dtb_tab_up_info_t {
+    uint64_t start_phy_addr;
+    uint64_t start_vir_addr;
+    uint32_t item_size;
+    uint32_t wr_index;
+    uint32_t rd_index;
+    uint32_t data_len[ZXDH_DTB_QUEUE_ITEM_NUM_MAX];
+    ZXDH_DTB_TAB_UP_USER_ADDR_T user_addr[ZXDH_DTB_QUEUE_ITEM_NUM_MAX];
+} ZXDH_DTB_TAB_UP_INFO_T;
+
+typedef struct zxdh_dtb_tab_down_info_t {
+    uint64_t start_phy_addr;
+    uint64_t start_vir_addr;
+    uint32_t item_size;
+    uint32_t wr_index;
+    uint32_t rd_index;
+} ZXDH_DTB_TAB_DOWN_INFO_T;
+
+typedef struct zxdh_dtb_queue_info_t {
+    uint32_t init_flag;
+    uint32_t vport;
+    uint32_t vector;
+    ZXDH_DTB_TAB_UP_INFO_T tab_up;
+    ZXDH_DTB_TAB_DOWN_INFO_T tab_down;
+} ZXDH_DTB_QUEUE_INFO_T;
+
+typedef struct zxdh_dtb_mgr_t {
+    ZXDH_DTB_QUEUE_INFO_T queue_info[ZXDH_DTB_QUEUE_NUM_MAX];
+} ZXDH_DTB_MGR_T;
+
+typedef struct zxdh_ppu_cls_bitmap_t {
+    uint32_t cls_use[ZXDH_PPU_CLUSTER_NUM];
+    uint32_t instr_mem[ZXDH_PPU_INSTR_MEM_NUM];
+} ZXDH_PPU_CLS_BITMAP_T;
+
+typedef struct dpp_sdt_item_t {
+    uint32_t     valid;
+    uint32_t     table_cfg[ZXDH_SDT_CFG_LEN];
+} ZXDH_SDT_ITEM_T;
+
+typedef struct dpp_sdt_soft_table_t {
+    uint32_t          device_id;
+    ZXDH_SDT_ITEM_T  sdt_array[ZXDH_DEV_SDT_ID_MAX];
+} ZXDH_SDT_SOFT_TABLE_T;
+
+typedef struct zxdh_sys_init_ctrl_t {
+    ZXDH_DEV_TYPE_E device_type;
+    uint32_t flags;
+    uint32_t sa_id;
+    uint32_t case_num;
+    uint32_t lif0_port_type;
+    uint32_t lif1_port_type;
+    uint64_t pcie_vir_baddr;
+    uint64_t riscv_vir_baddr;
+    uint64_t dma_vir_baddr;
+    uint64_t dma_phy_baddr;
+} ZXDH_SYS_INIT_CTRL_T;
+
+typedef struct dpp_dev_cfg_t {
+    uint32_t device_id;
+    ZXDH_DEV_TYPE_E dev_type;
+    uint32_t chip_ver;
+    uint32_t access_type;
+    uint32_t agent_flag;
+    uint32_t vport;
+    uint64_t pcie_addr;
+    uint64_t riscv_addr;
+    uint64_t dma_vir_addr;
+    uint64_t dma_phy_addr;
+    uint64_t agent_addr;
+    uint32_t init_flags[ZXDH_MODULE_INIT_MAX];
+} ZXDH_DEV_CFG_T;
+
+typedef struct zxdh_dev_mngr_t {
+    uint32_t         device_num;
+    uint32_t         is_init;
+    ZXDH_DEV_CFG_T       *p_dev_array[ZXDH_DEV_CHANNEL_MAX];
+} ZXDH_DEV_MGR_T;
+
+typedef struct zxdh_dtb_addr_info_t {
+    uint32_t sdt_no;
+    uint32_t size;
+    uint32_t phy_addr;
+    uint32_t vir_addr;
+} ZXDH_DTB_ADDR_INFO_T;
+
+typedef struct zxdh_dev_init_ctrl_t {
+    uint32_t vport;
+    char  port_name[ZXDH_PORT_NAME_MAX];
+    uint32_t vector;
+    uint32_t queue_id;
+    uint32_t np_bar_offset;
+    uint32_t np_bar_len;
+    uint32_t pcie_vir_addr;
+    uint32_t down_phy_addr;
+    uint32_t down_vir_addr;
+    uint32_t dump_phy_addr;
+    uint32_t dump_vir_addr;
+    uint32_t dump_sdt_num;
+    ZXDH_DTB_ADDR_INFO_T dump_addr_info[];
+} ZXDH_DEV_INIT_CTRL_T;
+
+typedef struct zxdh_sdt_mgr_t {
+    uint32_t          channel_num;
+    uint32_t          is_init;
+    ZXDH_SDT_SOFT_TABLE_T *sdt_tbl_array[ZXDH_DEV_CHANNEL_MAX];
+} ZXDH_SDT_MGR_T;
+
+int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
+
+#endif /* ZXDH_NP_H */
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 06d3f92b20..250e67d560 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -159,7 +159,7 @@ zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)
 
     desc_addr = vq->vq_ring_mem;
     avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc);
-    if (vtpci_packed_queue(vq->hw)) {
+    if (zxdh_pci_packed_queue(vq->hw)) {
         used_addr = RTE_ALIGN_CEIL((avail_addr +
                 sizeof(struct zxdh_vring_packed_desc_event)),
                 ZXDH_PCI_VRING_ALIGN);
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index ed6fd89742..d6487a574f 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -114,15 +114,15 @@ struct zxdh_pci_common_cfg {
 };
 
 static inline int32_t
-vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)
+zxdh_pci_with_feature(struct zxdh_hw *hw, uint64_t bit)
 {
     return (hw->guest_features & (1ULL << bit)) != 0;
 }
 
 static inline int32_t
-vtpci_packed_queue(struct zxdh_hw *hw)
+zxdh_pci_packed_queue(struct zxdh_hw *hw)
 {
-    return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);
+    return zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED);
 }
 
 struct zxdh_pci_ops {
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
index 462a88b23c..b4ef90ea36 100644
--- a/drivers/net/zxdh/zxdh_queue.c
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -13,7 +13,7 @@
 #include "zxdh_msg.h"
 
 struct rte_mbuf *
-zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq)
+zxdh_queue_detach_unused(struct zxdh_virtqueue *vq)
 {
     struct rte_mbuf *cookie = NULL;
     int32_t          idx    = 0;
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 1088bf08fc..1304d5e4ea 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -206,11 +206,11 @@ struct zxdh_tx_region {
 };
 
 static inline size_t
-vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)
+zxdh_vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)
 {
     size_t size;
 
-    if (vtpci_packed_queue(hw)) {
+    if (zxdh_pci_packed_queue(hw)) {
         size = num * sizeof(struct zxdh_vring_packed_desc);
         size += sizeof(struct zxdh_vring_packed_desc_event);
         size = RTE_ALIGN_CEIL(size, align);
@@ -226,7 +226,7 @@ vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)
 }
 
 static inline void
-vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,
+zxdh_vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,
         unsigned long align, uint32_t num)
 {
     vr->num    = num;
@@ -238,7 +238,7 @@ vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,
 }
 
 static inline void
-vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)
+zxdh_vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)
 {
     int32_t i = 0;
 
@@ -251,7 +251,7 @@ vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)
 }
 
 static inline void
-vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)
+zxdh_vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)
 {
     int32_t i = 0;
 
@@ -262,7 +262,7 @@ vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)
 }
 
 static inline void
-virtqueue_disable_intr(struct zxdh_virtqueue *vq)
+zxdh_queue_disable_intr(struct zxdh_virtqueue *vq)
 {
     if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {
         vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;
@@ -270,7 +270,7 @@ virtqueue_disable_intr(struct zxdh_virtqueue *vq)
     }
 }
 
-struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq);
+struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
 int32_t zxdh_free_queues(struct rte_eth_dev *dev);
 int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);
 
-- 
2.27.0