(np)network processor release resources in host.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
drivers/net/zxdh/zxdh_ethdev.c | 48 ++++
drivers/net/zxdh/zxdh_np.c | 470 +++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_np.h | 107 ++++++++
3 files changed, 625 insertions(+)
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index b8f4415e00..4e114d95da 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -841,6 +841,51 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+static void
+zxdh_np_dtb_data_res_free(struct zxdh_hw *hw)
+{
+ struct rte_eth_dev *dev = hw->eth_dev;
+ int ret;
+ int i;
+
+ if (g_dtb_data.init_done && g_dtb_data.bind_device == dev) {
+ ret = zxdh_np_online_uninit(0, dev->data->name, g_dtb_data.queueid);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name);
+
+ if (g_dtb_data.dtb_table_conf_mz)
+ rte_memzone_free(g_dtb_data.dtb_table_conf_mz);
+
+ if (g_dtb_data.dtb_table_dump_mz) {
+ rte_memzone_free(g_dtb_data.dtb_table_dump_mz);
+ g_dtb_data.dtb_table_dump_mz = NULL;
+ }
+
+ for (i = 0; i < ZXDH_MAX_BASE_DTB_TABLE_COUNT; i++) {
+ if (g_dtb_data.dtb_table_bulk_dump_mz[i]) {
+ rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]);
+ g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL;
+ }
+ }
+ g_dtb_data.init_done = 0;
+ g_dtb_data.bind_device = NULL;
+ }
+ if (zxdh_shared_data != NULL)
+ zxdh_shared_data->np_init_done = 0;
+}
+
+static void
+zxdh_np_uninit(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+
+ if (!g_dtb_data.init_done && !g_dtb_data.dev_refcnt)
+ return;
+
+ if (--g_dtb_data.dev_refcnt == 0)
+ zxdh_np_dtb_data_res_free(hw);
+}
+
static int
zxdh_dev_close(struct rte_eth_dev *dev)
{
@@ -848,6 +893,7 @@ zxdh_dev_close(struct rte_eth_dev *dev)
int ret = 0;
zxdh_intr_release(dev);
+ zxdh_np_uninit(dev);
zxdh_pci_reset(hw);
zxdh_dev_free_mbufs(dev);
@@ -1010,6 +1056,7 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
return 0;
free_res:
+ zxdh_np_dtb_data_res_free(hw);
rte_free(dpp_ctrl);
return ret;
}
@@ -1177,6 +1224,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
err_zxdh_init:
zxdh_intr_release(eth_dev);
+ zxdh_np_uninit(eth_dev);
zxdh_bar_msg_chan_exit();
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index e44d7ff501..28728b0c68 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -18,10 +18,21 @@ static ZXDH_DEV_MGR_T g_dev_mgr;
static ZXDH_SDT_MGR_T g_sdt_mgr;
ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX];
ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX];
+ZXDH_RISCV_DTB_MGR *p_riscv_dtb_queue_mgr[ZXDH_DEV_CHANNEL_MAX];
+ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX];
+ZXDH_REG_T g_dpp_reg_info[4];
#define ZXDH_SDT_MGR_PTR_GET() (&g_sdt_mgr)
#define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])
+#define ZXDH_COMM_MASK_BIT(_bitnum_)\
+ (0x1U << (_bitnum_))
+
+#define ZXDH_COMM_GET_BIT_MASK(_inttype_, _bitqnt_)\
+ ((_inttype_)(((_bitqnt_) < 32)))
+
+#define ZXDH_REG_DATA_MAX (128)
+
#define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\
do {\
if (NULL == (point)) {\
@@ -338,3 +349,462 @@ zxdh_np_host_init(uint32_t dev_id,
return 0;
}
+
+static ZXDH_RISCV_DTB_MGR *
+zxdh_np_riscv_dtb_queue_mgr_get(uint32_t dev_id)
+{
+ if (dev_id >= ZXDH_DEV_CHANNEL_MAX)
+ return NULL;
+ else
+ return p_riscv_dtb_queue_mgr[dev_id];
+}
+
+static uint32_t
+zxdh_np_riscv_dtb_mgr_queue_info_delete(uint32_t dev_id, uint32_t queue_id)
+{
+ ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL;
+
+ p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id);
+ if (p_riscv_dtb_mgr == NULL)
+ return 1;
+
+ p_riscv_dtb_mgr->queue_alloc_count--;
+ p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag = 0;
+ p_riscv_dtb_mgr->queue_user_info[queue_id].queue_id = 0xFF;
+ p_riscv_dtb_mgr->queue_user_info[queue_id].vport = 0;
+ memset(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, 0, ZXDH_PORT_NAME_MAX);
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_dev_get_dev_type(uint32_t dev_id)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = NULL;
+ ZXDH_DEV_CFG_T *p_dev_info = NULL;
+
+ p_dev_mgr = &g_dev_mgr;
+ p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ if (p_dev_info == NULL)
+ return 0xffff;
+
+ return p_dev_info->dev_type;
+}
+
+static uint32_t
+zxdh_np_comm_read_bits(uint8_t *p_base, uint32_t base_size_bit,
+ uint32_t *p_data, uint32_t start_bit, uint32_t end_bit)
+{
+ uint32_t start_byte_index;
+ uint32_t end_byte_index;
+ uint32_t byte_num;
+ uint32_t buffer_size;
+ uint32_t len;
+
+ if (0 != (base_size_bit % 8))
+ return 1;
+
+ if (start_bit > end_bit)
+ return 1;
+
+ if (base_size_bit < end_bit)
+ return 1;
+
+ len = end_bit - start_bit + 1;
+ buffer_size = base_size_bit / 8;
+ while (0 != (buffer_size & (buffer_size - 1)))
+ buffer_size += 1;
+
+ *p_data = 0;
+ end_byte_index = (end_bit >> 3);
+ start_byte_index = (start_bit >> 3);
+
+ if (start_byte_index == end_byte_index) {
+ *p_data = (uint32_t)(((p_base[start_byte_index] >> (7U - (end_bit & 7)))
+ & (0xff >> (8U - len))) & 0xff);
+ return 0;
+ }
+
+ if (start_bit & 7) {
+ *p_data = (p_base[start_byte_index] & (0xff >> (start_bit & 7))) & UINT8_MAX;
+ start_byte_index++;
+ }
+
+ for (byte_num = start_byte_index; byte_num < end_byte_index; byte_num++) {
+ *p_data <<= 8;
+ *p_data += p_base[byte_num];
+ }
+
+ *p_data <<= 1 + (end_bit & 7);
+ *p_data += ((p_base[byte_num & (buffer_size - 1)] & (0xff << (7 - (end_bit & 7)))) >>
+ (7 - (end_bit & 7))) & 0xff;
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_comm_read_bits_ex(uint8_t *p_base, uint32_t base_size_bit,
+ uint32_t *p_data, uint32_t msb_start_pos, uint32_t len)
+{
+ uint32_t rtn;
+
+ rtn = zxdh_np_comm_read_bits(p_base,
+ base_size_bit,
+ p_data,
+ (base_size_bit - 1 - msb_start_pos),
+ (base_size_bit - 1 - msb_start_pos + len - 1));
+ return rtn;
+}
+
+static uint32_t
+zxdh_np_reg_read(uint32_t dev_id, uint32_t reg_no,
+ uint32_t m_offset, uint32_t n_offset, void *p_data)
+{
+ uint32_t p_buff[ZXDH_REG_DATA_MAX] = {0};
+ ZXDH_REG_T *p_reg_info = NULL;
+ ZXDH_FIELD_T *p_field_info = NULL;
+ uint32_t rc = 0;
+ uint32_t i;
+
+ if (reg_no < 4) {
+ p_reg_info = &g_dpp_reg_info[reg_no];
+ p_field_info = p_reg_info->p_fields;
+ for (i = 0; i < p_reg_info->field_num; i++) {
+ rc = zxdh_np_comm_read_bits_ex((uint8_t *)p_buff,
+ p_reg_info->width * 8,
+ (uint32_t *)p_data + i,
+ p_field_info[i].msb_pos,
+ p_field_info[i].len);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxic_comm_read_bits_ex");
+ PMD_DRV_LOG(ERR, "dev_id %d(%d)(%d)is ok!", dev_id, m_offset, n_offset);
+ }
+ }
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_vm_info_get(uint32_t dev_id,
+ uint32_t queue_id,
+ ZXDH_DTB_QUEUE_VM_INFO_T *p_vm_info)
+{
+ ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = {0};
+ uint32_t rc;
+
+ rc = zxdh_np_reg_read(dev_id, ZXDH_DTB_CFG_EPID_V_FUNC_NUM,
+ 0, queue_id, &vm_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_reg_read");
+
+ p_vm_info->dbi_en = vm_info.dbi_en;
+ p_vm_info->queue_en = vm_info.queue_en;
+ p_vm_info->epid = vm_info.cfg_epid;
+ p_vm_info->vector = vm_info.cfg_vector;
+ p_vm_info->vfunc_num = vm_info.cfg_vfunc_num;
+ p_vm_info->func_num = vm_info.cfg_func_num;
+ p_vm_info->vfunc_active = vm_info.cfg_vfunc_active;
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_comm_write_bits(uint8_t *p_base, uint32_t base_size_bit,
+ uint32_t data, uint32_t start_bit, uint32_t end_bit)
+{
+ uint32_t start_byte_index;
+ uint32_t end_byte_index;
+ uint8_t mask_value;
+ uint32_t byte_num;
+ uint32_t buffer_size;
+
+ if (0 != (base_size_bit % 8))
+ return 1;
+
+ if (start_bit > end_bit)
+ return 1;
+
+ if (base_size_bit < end_bit)
+ return 1;
+
+ buffer_size = base_size_bit / 8;
+
+ while (0 != (buffer_size & (buffer_size - 1)))
+ buffer_size += 1;
+
+ end_byte_index = (end_bit >> 3);
+ start_byte_index = (start_bit >> 3);
+
+ if (start_byte_index == end_byte_index) {
+ mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff);
+ mask_value |= (((1 << (7 - (end_bit & 7))) - 1) & 0xff);
+ p_base[end_byte_index] &= mask_value;
+ p_base[end_byte_index] |= (((data << (7 - (end_bit & 7)))) & 0xff);
+ return 0;
+ }
+
+ if (7 != (end_bit & 7)) {
+ mask_value = ((0x7f >> (end_bit & 7)) & 0xff);
+ p_base[end_byte_index] &= mask_value;
+ p_base[end_byte_index] |= ((data << (7 - (end_bit & 7))) & 0xff);
+ end_byte_index--;
+ data >>= 1 + (end_bit & 7);
+ }
+
+ for (byte_num = end_byte_index; byte_num > start_byte_index; byte_num--) {
+ p_base[byte_num & (buffer_size - 1)] = data & 0xff;
+ data >>= 8;
+ }
+
+ mask_value = ((0xFE << (7 - (start_bit & 7))) & 0xff);
+ p_base[byte_num] &= mask_value;
+ p_base[byte_num] |= data;
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_comm_write_bits_ex(uint8_t *p_base,
+ uint32_t base_size_bit,
+ uint32_t data,
+ uint32_t msb_start_pos,
+ uint32_t len)
+{
+ uint32_t rtn;
+
+ rtn = zxdh_np_comm_write_bits(p_base,
+ base_size_bit,
+ data,
+ (base_size_bit - 1 - msb_start_pos),
+ (base_size_bit - 1 - msb_start_pos + len - 1));
+
+ return rtn;
+}
+
+static uint32_t
+zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no,
+ uint32_t m_offset, uint32_t n_offset, void *p_data)
+{
+ uint32_t p_buff[ZXDH_REG_DATA_MAX] = {0};
+ ZXDH_REG_T *p_reg_info = NULL;
+ ZXDH_FIELD_T *p_field_info = NULL;
+ uint32_t temp_data;
+ uint32_t rc;
+ uint32_t i;
+
+ if (reg_no < 4) {
+ p_reg_info = &g_dpp_reg_info[reg_no];
+ p_field_info = p_reg_info->p_fields;
+
+ for (i = 0; i < p_reg_info->field_num; i++) {
+ if (p_field_info[i].len <= 32) {
+ temp_data = *((uint32_t *)p_data + i);
+ rc = zxdh_np_comm_write_bits_ex((uint8_t *)p_buff,
+ p_reg_info->width * 8,
+ temp_data,
+ p_field_info[i].msb_pos,
+ p_field_info[i].len);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_comm_write_bits_ex");
+ PMD_DRV_LOG(ERR, "dev_id %d(%d)(%d)is ok!",
+ dev_id, m_offset, n_offset);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_vm_info_set(uint32_t dev_id,
+ uint32_t queue_id,
+ ZXDH_DTB_QUEUE_VM_INFO_T *p_vm_info)
+{
+ uint32_t rc = 0;
+ ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T vm_info = {0};
+
+ vm_info.dbi_en = p_vm_info->dbi_en;
+ vm_info.queue_en = p_vm_info->queue_en;
+ vm_info.cfg_epid = p_vm_info->epid;
+ vm_info.cfg_vector = p_vm_info->vector;
+ vm_info.cfg_vfunc_num = p_vm_info->vfunc_num;
+ vm_info.cfg_func_num = p_vm_info->func_num;
+ vm_info.cfg_vfunc_active = p_vm_info->vfunc_active;
+
+ rc = zxdh_np_reg_write(dev_id, ZXDH_DTB_CFG_EPID_V_FUNC_NUM,
+ 0, queue_id, &vm_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_reg_write");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_enable_set(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t enable)
+{
+ ZXDH_DTB_QUEUE_VM_INFO_T vm_info = {0};
+ uint32_t rc;
+
+ rc = zxdh_np_dtb_queue_vm_info_get(dev_id, queue_id, &vm_info);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_get");
+
+ vm_info.queue_en = enable;
+ rc = zxdh_np_dtb_queue_vm_info_set(dev_id, queue_id, &vm_info);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_set");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_riscv_dpp_dtb_queue_id_release(uint32_t dev_id,
+ char name[ZXDH_PORT_NAME_MAX], uint32_t queue_id)
+{
+ ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL;
+
+ p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id);
+ if (p_riscv_dtb_mgr == NULL)
+ return 1;
+
+ if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM)
+ return 0;
+
+ if (p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag != 1) {
+ PMD_DRV_LOG(ERR, "queue %d not alloc!", queue_id);
+ return 2;
+ }
+
+ if (strcmp(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, name) != 0) {
+ PMD_DRV_LOG(ERR, "queue %d name %s error!", queue_id, name);
+ return 3;
+ }
+ zxdh_np_dtb_queue_enable_set(dev_id, queue_id, 0);
+ zxdh_np_riscv_dtb_mgr_queue_info_delete(dev_id, queue_id);
+
+ return 0;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_unused_item_num_get(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t *p_item_num)
+{
+ uint32_t rc;
+
+ if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) {
+ *p_item_num = 32;
+ return 0;
+ }
+
+ rc = zxdh_np_reg_read(dev_id, ZXDH_DTB_INFO_QUEUE_BUF_SPACE,
+ 0, queue_id, p_item_num);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "dpp_reg_read");
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_id_free(uint32_t dev_id,
+ uint32_t queue_id)
+{
+ uint32_t item_num = 0;
+ ZXDH_DTB_MGR_T *p_dtb_mgr = NULL;
+ uint32_t rc;
+
+ p_dtb_mgr = p_dpp_dtb_mgr[dev_id];
+ if (p_dtb_mgr == NULL)
+ return 1;
+
+ rc = zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &item_num);
+
+ p_dtb_mgr->queue_info[queue_id].init_flag = 0;
+ p_dtb_mgr->queue_info[queue_id].vport = 0;
+ p_dtb_mgr->queue_info[queue_id].vector = 0;
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_queue_release(uint32_t devid,
+ char pname[32],
+ uint32_t queueid)
+{
+ uint32_t rc;
+
+ ZXDH_COMM_CHECK_DEV_POINT(devid, pname);
+
+ rc = zxdh_np_riscv_dpp_dtb_queue_id_release(devid, pname, queueid);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_riscv_dpp_dtb_queue_id_release");
+
+ rc = zxdh_np_dtb_queue_id_free(devid, queueid);
+ ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_id_free");
+
+ return rc;
+}
+
+static void
+zxdh_np_dtb_mgr_destroy(uint32_t dev_id)
+{
+ if (p_dpp_dtb_mgr[dev_id] != NULL) {
+ free(p_dpp_dtb_mgr[dev_id]);
+ p_dpp_dtb_mgr[dev_id] = NULL;
+ }
+}
+
+static void
+zxdh_np_tlb_mgr_destroy(uint32_t dev_id)
+{
+ if (g_p_dpp_tlb_mgr[dev_id] != NULL) {
+ free(g_p_dpp_tlb_mgr[dev_id]);
+ g_p_dpp_tlb_mgr[dev_id] = NULL;
+ }
+}
+
+static void
+zxdh_np_sdt_mgr_destroy(uint32_t dev_id)
+{
+ ZXDH_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL;
+ ZXDH_SDT_MGR_T *p_sdt_mgr = NULL;
+
+ p_sdt_tbl_temp = ZXDH_SDT_SOFT_TBL_GET(dev_id);
+ p_sdt_mgr = ZXDH_SDT_MGR_PTR_GET();
+
+ if (p_sdt_tbl_temp != NULL)
+ free(p_sdt_tbl_temp);
+
+ ZXDH_SDT_SOFT_TBL_GET(dev_id) = NULL;
+
+ p_sdt_mgr->channel_num--;
+}
+
+static void
+zxdh_np_dev_del(uint32_t dev_id)
+{
+ ZXDH_DEV_CFG_T *p_dev_info = NULL;
+ ZXDH_DEV_MGR_T *p_dev_mgr = NULL;
+
+ p_dev_mgr = &g_dev_mgr;
+ p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ if (p_dev_info != NULL) {
+ free(p_dev_info);
+ p_dev_mgr->p_dev_array[dev_id] = NULL;
+ p_dev_mgr->device_num--;
+ }
+}
+
+int
+zxdh_np_online_uninit(uint32_t dev_id,
+ char *port_name,
+ uint32_t queue_id)
+{
+ uint32_t rc;
+
+ rc = zxdh_np_dtb_queue_release(dev_id, port_name, queue_id);
+ if (rc != 0)
+ PMD_DRV_LOG(ERR, "%s:dtb release error,"
+ "port name %s queue id %d. ", __func__, port_name, queue_id);
+
+ zxdh_np_dtb_mgr_destroy(dev_id);
+ zxdh_np_tlb_mgr_destroy(dev_id);
+ zxdh_np_sdt_mgr_destroy(dev_id);
+ zxdh_np_dev_del(dev_id);
+
+ return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 573eafe796..dc0e867827 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -47,6 +47,11 @@
#define ZXDH_INIT_FLAG_TM_IMEM_FLAG (1 << 9)
#define ZXDH_INIT_FLAG_AGENT_FLAG (1 << 10)
+#define ZXDH_ACL_TBL_ID_MIN (0)
+#define ZXDH_ACL_TBL_ID_MAX (7)
+#define ZXDH_ACL_TBL_ID_NUM (8U)
+#define ZXDH_ACL_BLOCK_NUM (8U)
+
typedef enum zxdh_module_init_e {
ZXDH_MODULE_INIT_NPPU = 0,
ZXDH_MODULE_INIT_PPU,
@@ -67,6 +72,15 @@ typedef enum zxdh_dev_type_e {
ZXDH_DEV_TYPE_INVALID,
} ZXDH_DEV_TYPE_E;
+typedef enum zxdh_reg_info_e {
+ ZXDH_DTB_CFG_QUEUE_DTB_HADDR = 0,
+ ZXDH_DTB_CFG_QUEUE_DTB_LADDR = 1,
+ ZXDH_DTB_CFG_QUEUE_DTB_LEN = 2,
+ ZXDH_DTB_INFO_QUEUE_BUF_SPACE = 3,
+ ZXDH_DTB_CFG_EPID_V_FUNC_NUM = 4,
+ ZXDH_REG_ENUM_MAX_VALUE
+} ZXDH_REG_INFO_E;
+
typedef enum zxdh_dev_access_type_e {
ZXDH_DEV_ACCESS_TYPE_PCIE = 0,
ZXDH_DEV_ACCESS_TYPE_RISCV = 1,
@@ -79,6 +93,26 @@ typedef enum zxdh_dev_agent_flag_e {
ZXDH_DEV_AGENT_INVALID,
} ZXDH_DEV_AGENT_FLAG_E;
+typedef enum zxdh_acl_pri_mode_e {
+ ZXDH_ACL_PRI_EXPLICIT = 1,
+ ZXDH_ACL_PRI_IMPLICIT,
+ ZXDH_ACL_PRI_SPECIFY,
+ ZXDH_ACL_PRI_INVALID,
+} ZXDH_ACL_PRI_MODE_E;
+
+typedef struct zxdh_d_node {
+ void *data;
+ struct zxdh_d_node *prev;
+ struct zxdh_d_node *next;
+} ZXDH_D_NODE;
+
+typedef struct zxdh_d_head {
+ uint32_t used;
+ uint32_t maxnum;
+ ZXDH_D_NODE *p_next;
+ ZXDH_D_NODE *p_prev;
+} ZXDH_D_HEAD;
+
typedef struct zxdh_dtb_tab_up_user_addr_t {
uint32_t user_flag;
uint64_t phy_addr;
@@ -193,6 +227,79 @@ typedef struct zxdh_sdt_mgr_t {
ZXDH_SDT_SOFT_TABLE_T *sdt_tbl_array[ZXDH_DEV_CHANNEL_MAX];
} ZXDH_SDT_MGR_T;
+typedef struct zxdh_riscv_dtb_queue_USER_info_t {
+ uint32_t alloc_flag;
+ uint32_t queue_id;
+ uint32_t vport;
+ char user_name[ZXDH_PORT_NAME_MAX];
+} ZXDH_RISCV_DTB_QUEUE_USER_INFO_T;
+
+typedef struct zxdh_riscv_dtb_mgr {
+ uint32_t queue_alloc_count;
+ uint32_t queue_index;
+ ZXDH_RISCV_DTB_QUEUE_USER_INFO_T queue_user_info[ZXDH_DTB_QUEUE_NUM_MAX];
+} ZXDH_RISCV_DTB_MGR;
+
+typedef struct zxdh_dtb_queue_vm_info_t {
+ uint32_t dbi_en;
+ uint32_t queue_en;
+ uint32_t epid;
+ uint32_t vfunc_num;
+ uint32_t vector;
+ uint32_t func_num;
+ uint32_t vfunc_active;
+} ZXDH_DTB_QUEUE_VM_INFO_T;
+
+typedef struct zxdh_dtb4k_dtb_enq_cfg_epid_v_func_num_0_127_t {
+ uint32_t dbi_en;
+ uint32_t queue_en;
+ uint32_t cfg_epid;
+ uint32_t cfg_vfunc_num;
+ uint32_t cfg_vector;
+ uint32_t cfg_func_num;
+ uint32_t cfg_vfunc_active;
+} ZXDH_DTB4K_DTB_ENQ_CFG_EPID_V_FUNC_NUM_0_127_T;
+
+
+typedef uint32_t (*ZXDH_REG_WRITE)(uint32_t dev_id, uint32_t addr, uint32_t *p_data);
+typedef uint32_t (*ZXDH_REG_READ)(uint32_t dev_id, uint32_t addr, uint32_t *p_data);
+
+typedef struct zxdh_field_t {
+ const char *p_name;
+ uint32_t flags;
+ uint16_t msb_pos;
+
+ uint16_t len;
+ uint32_t default_value;
+ uint32_t default_step;
+} ZXDH_FIELD_T;
+
+typedef struct zxdh_reg_t {
+ const char *reg_name;
+ uint32_t reg_no;
+ uint32_t module_no;
+ uint32_t flags;
+ uint32_t array_type;
+ uint32_t addr;
+ uint32_t width;
+ uint32_t m_size;
+ uint32_t n_size;
+ uint32_t m_step;
+ uint32_t n_step;
+ uint32_t field_num;
+ ZXDH_FIELD_T *p_fields;
+
+ ZXDH_REG_WRITE p_write_fun;
+ ZXDH_REG_READ p_read_fun;
+} ZXDH_REG_T;
+
+typedef struct zxdh_tlb_mgr_t {
+ uint32_t entry_num;
+ uint32_t va_width;
+ uint32_t pa_width;
+} ZXDH_TLB_MGR_T;
+
int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
+int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
#endif /* ZXDH_NP_H */
--
2.27.0