Modify the implementation of the dtb queue request and release interfaces, and add the implementation of queue initialization. Signed-off-by: Bingbin Chen --- drivers/net/zxdh/zxdh_np.c | 495 +++++++++++++++++++++++++++++-------- drivers/net/zxdh/zxdh_np.h | 97 ++++++++ 2 files changed, 485 insertions(+), 107 deletions(-) diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c index cf5a192a02..eb57d61898 100644 --- a/drivers/net/zxdh/zxdh_np.c +++ b/drivers/net/zxdh/zxdh_np.c @@ -17,16 +17,15 @@ static ZXDH_DEV_MGR_T g_dev_mgr; static ZXDH_SDT_MGR_T g_sdt_mgr; -static uint32_t g_dpp_dtb_int_enable; static uint32_t g_table_type[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX]; static ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX]; static ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX]; -static ZXDH_RISCV_DTB_MGR *p_riscv_dtb_queue_mgr[ZXDH_DEV_CHANNEL_MAX]; static ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX]; static ZXDH_PPU_STAT_CFG_T g_ppu_stat_cfg[ZXDH_DEV_CHANNEL_MAX]; static uint64_t g_np_fw_compat_addr[ZXDH_DEV_CHANNEL_MAX]; static const ZXDH_VERSION_COMPATIBLE_REG_T g_np_sdk_version = { ZXDH_NPSDK_COMPAT_ITEM_ID, 1, 0, 0, 0, {0} }; +static const uint32_t hardware_ep_id[5] = {5, 6, 7, 8, 9}; static ZXDH_FIELD_T g_smmu0_smmu0_cpu_ind_cmd_reg[] = { {"cpu_ind_rw", ZXDH_FIELD_FLAG_RW, 31, 1, 0x0, 0x0}, @@ -436,6 +435,25 @@ zxdh_np_dev_opr_spinlock_get(uint32_t dev_id, uint32_t type, ZXDH_SPINLOCK_T **p return ZXDH_OK; } +static uint32_t +zxdh_np_dev_dtb_opr_spinlock_get(uint32_t dev_id, uint32_t type, + uint32_t index, ZXDH_SPINLOCK_T **p_spinlock_out) +{ + ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr; + ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id]; + + switch (type) { + case ZXDH_DEV_SPINLOCK_T_DTB: + *p_spinlock_out = &p_dev_info->dtb_queue_spinlock[index]; + break; + default: + PMD_DRV_LOG(ERR, "spinlock type is invalid!"); + return ZXDH_ERR; + } + + return ZXDH_OK; +} + static uint32_t zxdh_np_dev_read_channel(uint32_t dev_id, uint32_t addr, uint32_t size, uint32_t *p_data) { @@ -767,6 +785,7 @@ zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type, { ZXDH_DEV_CFG_T *p_dev_info = NULL; ZXDH_DEV_MGR_T *p_dev_mgr = NULL; + uint32_t i = 0; p_dev_mgr = &g_dev_mgr; if (!p_dev_mgr->is_init) { @@ -802,6 +821,10 @@ zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type, p_dev_info->p_pcie_read_fun = zxdh_np_dev_pcie_default_read; zxdh_np_comm_spinlock_create(&p_dev_info->dtb_spinlock); + + for (i = 0; i < ZXDH_DTB_QUEUE_NUM_MAX; i++) + zxdh_np_comm_spinlock_create(&p_dev_info->dtb_queue_spinlock[i]); + return 0; } @@ -1156,6 +1179,89 @@ zxdh_np_agent_channel_reg_write(uint32_t dev_id, return ret; } +static uint32_t +zxdh_np_agent_channel_dtb_sync_send(uint32_t dev_id, + ZXDH_AGENT_CHANNEL_DTB_MSG_T *p_msg, + uint32_t *p_data, + uint32_t rep_len) +{ + uint32_t ret = ZXDH_OK; + + ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {0}; + agent_msg.msg = (void *)p_msg; + agent_msg.msg_len = sizeof(ZXDH_AGENT_CHANNEL_DTB_MSG_T); + + ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, p_data, rep_len); + if (ret != ZXDH_OK) { + PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed"); + return ZXDH_ERR; + } + + return ZXDH_OK; +} + +static uint32_t +zxdh_np_agent_channel_dtb_queue_request(uint32_t dev_id, + char p_name[32], + uint32_t vport_info, + uint32_t *p_queue_id) +{ + uint32_t rc = ZXDH_OK; + + uint32_t rsp_buff[2] = {0}; + uint32_t msg_result = 0; + uint32_t queue_id = 0; + ZXDH_AGENT_CHANNEL_DTB_MSG_T msgcfg = { + .dev_id = 0, + .type = ZXDH_DTB_MSG, + .oper = ZXDH_QUEUE_REQUEST, + .vport = vport_info, + }; + memcpy(msgcfg.name, p_name, strnlen(p_name, ZXDH_PORT_NAME_MAX)); + + PMD_DRV_LOG(DEBUG, "msgcfg.name=%s", msgcfg.name); + + rc = zxdh_np_agent_channel_dtb_sync_send(dev_id, &msgcfg, rsp_buff, sizeof(rsp_buff)); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + queue_id = rsp_buff[1]; + + PMD_DRV_LOG(DEBUG, "dev_id: %d, msg_result: %d", dev_id, msg_result); + PMD_DRV_LOG(DEBUG, "dev_id: %d, queue_id: %d", dev_id, queue_id); + + *p_queue_id = queue_id; + + return msg_result; +} + +static uint32_t +zxdh_np_agent_channel_dtb_queue_release(uint32_t dev_id, + char p_name[32], + __rte_unused uint32_t queue_id) +{ + uint32_t rc = ZXDH_OK; + + uint32_t msg_result = 0; + uint32_t rsp_buff[2] = {0}; + ZXDH_AGENT_CHANNEL_DTB_MSG_T msgcfg = { + .dev_id = 0, + .type = ZXDH_DTB_MSG, + .oper = ZXDH_QUEUE_RELEASE, + .queue_id = queue_id, + }; + + memcpy(msgcfg.name, p_name, strnlen(p_name, ZXDH_PORT_NAME_MAX)); + + rc = zxdh_np_agent_channel_dtb_sync_send(dev_id, &msgcfg, rsp_buff, sizeof(rsp_buff)); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_dtb_sync_send"); + + msg_result = rsp_buff[0]; + PMD_DRV_LOG(DEBUG, "msg_result: %d", msg_result); + + return msg_result; +} + static ZXDH_DTB_MGR_T * zxdh_np_dtb_mgr_get(uint32_t dev_id) { @@ -1384,48 +1490,6 @@ zxdh_np_pcie_bar_msg_num_get(uint32_t dev_id, uint32_t *p_bar_msg_num) return rc; } -static ZXDH_RISCV_DTB_MGR * -zxdh_np_riscv_dtb_queue_mgr_get(uint32_t dev_id) -{ - if (dev_id >= ZXDH_DEV_CHANNEL_MAX) - return NULL; - else - return p_riscv_dtb_queue_mgr[dev_id]; -} - -static uint32_t -zxdh_np_riscv_dtb_mgr_queue_info_delete(uint32_t dev_id, uint32_t queue_id) -{ - ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL; - - p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id); - if (p_riscv_dtb_mgr == NULL) - return 1; - - p_riscv_dtb_mgr->queue_alloc_count--; - p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag = 0; - p_riscv_dtb_mgr->queue_user_info[queue_id].queue_id = 0xFF; - p_riscv_dtb_mgr->queue_user_info[queue_id].vport = 0; - memset(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, 0, ZXDH_PORT_NAME_MAX); - - return 0; -} - -static uint32_t -zxdh_np_dev_get_dev_type(uint32_t dev_id) -{ - ZXDH_DEV_MGR_T *p_dev_mgr = NULL; - ZXDH_DEV_CFG_T *p_dev_info = NULL; - - p_dev_mgr = &g_dev_mgr; - p_dev_info = p_dev_mgr->p_dev_array[dev_id]; - - if (p_dev_info == NULL) - return 0xffff; - - return p_dev_info->dev_type; -} - static uint32_t zxdh_np_comm_read_bits(uint8_t *p_base, uint32_t base_size_bit, uint32_t *p_data, uint32_t start_bit, uint32_t end_bit) @@ -1715,52 +1779,6 @@ zxdh_np_dtb_queue_vm_info_set(uint32_t dev_id, return rc; } -static uint32_t -zxdh_np_dtb_queue_enable_set(uint32_t dev_id, - uint32_t queue_id, - uint32_t enable) -{ - ZXDH_DTB_QUEUE_VM_INFO_T vm_info = {0}; - uint32_t rc; - - rc = zxdh_np_dtb_queue_vm_info_get(dev_id, queue_id, &vm_info); - ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_get"); - - vm_info.queue_en = enable; - rc = zxdh_np_dtb_queue_vm_info_set(dev_id, queue_id, &vm_info); - ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_vm_info_set"); - - return rc; -} - -static uint32_t -zxdh_np_riscv_dpp_dtb_queue_id_release(uint32_t dev_id, - char name[ZXDH_PORT_NAME_MAX], uint32_t queue_id) -{ - ZXDH_RISCV_DTB_MGR *p_riscv_dtb_mgr = NULL; - - p_riscv_dtb_mgr = zxdh_np_riscv_dtb_queue_mgr_get(dev_id); - if (p_riscv_dtb_mgr == NULL) - return 1; - - if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) - return 0; - - if (p_riscv_dtb_mgr->queue_user_info[queue_id].alloc_flag != 1) { - PMD_DRV_LOG(ERR, "queue %d not alloc!", queue_id); - return 2; - } - - if (strcmp(p_riscv_dtb_mgr->queue_user_info[queue_id].user_name, name) != 0) { - PMD_DRV_LOG(ERR, "queue %d name %s error!", queue_id, name); - return 3; - } - zxdh_np_dtb_queue_enable_set(dev_id, queue_id, 0); - zxdh_np_riscv_dtb_mgr_queue_info_delete(dev_id, queue_id); - - return 0; -} - static uint32_t zxdh_np_dtb_queue_unused_item_num_get(uint32_t dev_id, uint32_t queue_id, @@ -1795,20 +1813,74 @@ zxdh_np_dtb_queue_id_free(uint32_t dev_id, return rc; } +static uint32_t +zxdh_np_dtb_queue_request(uint32_t dev_id, char p_name[32], + uint16_t vport, uint32_t *p_queue_id) +{ + uint32_t rc = ZXDH_OK; + uint32_t queue_id = 0xFF; + ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL; + ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB; + uint32_t vport_info = (uint32_t)vport; + + rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get"); + + zxdh_np_comm_spinlock_lock(p_dtb_spinlock); + + rc = zxdh_np_agent_channel_dtb_queue_request(dev_id, p_name, vport_info, &queue_id); + if (rc == ZXDH_RC_DTB_QUEUE_RES_EMPTY) { + PMD_DRV_LOG(ERR, "dtb queue is locked full."); + zxdh_np_comm_spinlock_unlock(p_dtb_spinlock); + return ZXDH_RC_DTB_QUEUE_RES_EMPTY; + } + + zxdh_np_comm_spinlock_unlock(p_dtb_spinlock); + + PMD_DRV_LOG(DEBUG, "dtb request queue is %d.", queue_id); + + *p_queue_id = queue_id; + + PMD_DRV_LOG(INFO, "dev_id %d vport 0x%x name %s queue_id %d done.", + dev_id, vport_info, p_name, queue_id); + + return rc; +} + static uint32_t zxdh_np_dtb_queue_release(uint32_t devid, char pname[32], uint32_t queueid) { - uint32_t rc; + uint32_t rc = ZXDH_OK; + ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL; + ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB; + + rc = zxdh_np_dev_opr_spinlock_get(devid, (uint32_t)spinlock, &p_dtb_spinlock); + ZXDH_COMM_CHECK_DEV_RC(devid, rc, "zxdh_np_dev_opr_spinlock_get"); + + zxdh_np_comm_spinlock_lock(p_dtb_spinlock); + + rc = zxdh_np_agent_channel_dtb_queue_release(devid, pname, queueid); + + if (rc == ZXDH_RC_DTB_QUEUE_NOT_ALLOC) { + PMD_DRV_LOG(ERR, "dtb queue id %d not request.", queueid); + zxdh_np_comm_spinlock_unlock(p_dtb_spinlock); + return ZXDH_RC_DTB_QUEUE_NOT_ALLOC; + } - ZXDH_COMM_CHECK_DEV_POINT(devid, pname); + if (rc == ZXDH_RC_DTB_QUEUE_NAME_ERROR) { + PMD_DRV_LOG(ERR, "dtb queue %d name error.", queueid); + zxdh_np_comm_spinlock_unlock(p_dtb_spinlock); + return ZXDH_RC_DTB_QUEUE_NAME_ERROR; + } - rc = zxdh_np_riscv_dpp_dtb_queue_id_release(devid, pname, queueid); - ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_riscv_dpp_dtb_queue_id_release"); + zxdh_np_comm_spinlock_unlock(p_dtb_spinlock); rc = zxdh_np_dtb_queue_id_free(devid, queueid); - ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_dtb_queue_id_free"); + ZXDH_COMM_CHECK_DEV_RC(devid, rc, "zxdh_np_dtb_queue_id_free"); + + PMD_DRV_LOG(INFO, "queueid %d", queueid); return rc; } @@ -2260,24 +2332,34 @@ zxdh_np_dtb_tab_down_info_set(uint32_t dev_id, uint32_t item_index; uint32_t i; uint32_t rc; + ZXDH_SPINLOCK_T *p_spinlock = NULL; + + zxdh_np_dev_dtb_opr_spinlock_get(dev_id, ZXDH_DEV_SPINLOCK_T_DTB, queue_id, &p_spinlock); + zxdh_np_comm_spinlock_lock(p_spinlock); if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { - PMD_DRV_LOG(ERR, "dtb queue %d is not init", queue_id); + PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id); + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; } - if (data_len % 4 != 0) + if (data_len % 4 != 0) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_PARA_INVALID; + } rc = zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en); if (!queue_en) { PMD_DRV_LOG(ERR, "the queue %d is not enable!,rc=%d", queue_id, rc); + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_NOT_ENABLE; } rc = zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num); - if (unused_item_num == 0) + if (unused_item_num == 0) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY; + } for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { item_index = ZXDH_DTB_TAB_DOWN_WR_INDEX_GET(dev_id, queue_id) % @@ -2292,8 +2374,10 @@ zxdh_np_dtb_tab_down_info_set(uint32_t dev_id, break; } - if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) + if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY; + } rc = zxdh_np_dtb_item_buff_wr(dev_id, queue_id, 0, item_index, 0, data_len, p_data); @@ -2313,6 +2397,8 @@ zxdh_np_dtb_tab_down_info_set(uint32_t dev_id, rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info); *p_item_index = item_index; + zxdh_np_comm_spinlock_unlock(p_spinlock); + return rc; } @@ -2326,8 +2412,6 @@ zxdh_np_dtb_write_down_table_data(uint32_t dev_id, uint32_t rc = 0; uint32_t dtb_interrupt_status = 0; - dtb_interrupt_status = g_dpp_dtb_int_enable; - rc = zxdh_np_dtb_tab_down_info_set(dev_id, queue_id, dtb_interrupt_status, @@ -2658,20 +2742,28 @@ zxdh_np_dtb_tab_up_info_set(uint32_t dev_id, ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0}; uint32_t queue_en = 0; uint32_t rc; + ZXDH_SPINLOCK_T *p_spinlock = NULL; + + zxdh_np_dev_dtb_opr_spinlock_get(dev_id, ZXDH_DEV_SPINLOCK_T_DTB, queue_id, &p_spinlock); + zxdh_np_comm_spinlock_lock(p_spinlock); zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en); if (!queue_en) { PMD_DRV_LOG(ERR, "the queue %d is not enable!", queue_id); + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_NOT_ENABLE; } if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { PMD_DRV_LOG(ERR, "dtb queue %d is not init", queue_id); + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; } - if (desc_len % 4 != 0) + if (desc_len % 4 != 0) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_PARA_INVALID; + } zxdh_np_dtb_item_buff_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index, 0, desc_len, p_desc_data); @@ -2683,11 +2775,10 @@ zxdh_np_dtb_tab_up_info_set(uint32_t dev_id, item_info.int_en = int_flag; item_info.data_len = desc_len / 4; - if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM) - return 0; - rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info); + zxdh_np_comm_spinlock_unlock(p_spinlock); + return rc; } @@ -2730,16 +2821,23 @@ zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id, uint32_t item_index = 0; uint32_t unused_item_num = 0; uint32_t i; + ZXDH_SPINLOCK_T *p_spinlock = NULL; + + zxdh_np_dev_dtb_opr_spinlock_get(dev_id, ZXDH_DEV_SPINLOCK_T_DTB, queue_id, &p_spinlock); + zxdh_np_comm_spinlock_lock(p_spinlock); if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) { PMD_DRV_LOG(ERR, "dtb queue %d is not init", queue_id); + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT; } zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num); - if (unused_item_num == 0) + if (unused_item_num == 0) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY; + } for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { item_index = ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id) % @@ -2754,14 +2852,17 @@ zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id, break; } - if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) + if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX) { + zxdh_np_comm_spinlock_unlock(p_spinlock); return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY; + } zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index, 0, ZXDH_DTB_TAB_ACK_IS_USING_MASK); *p_item_index = item_index; + zxdh_np_comm_spinlock_unlock(p_spinlock); return 0; } @@ -2952,6 +3053,181 @@ zxdh_np_dtb_stats_get(uint32_t dev_id, return rc; } +static uint32_t +zxdh_np_dtb_queue_down_init(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_QUEUE_CFG_T *p_queue_cfg) +{ + uint32_t rc = 0; + uint32_t i = 0; + uint32_t ack_vale = 0; + uint32_t tab_down_item_size = 0; + ZXDH_DTB_MGR_T *p_dtb_mgr = NULL; + + p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id); + p_dtb_mgr->queue_info[queue_id].init_flag = 1; + + tab_down_item_size = (p_queue_cfg->down_item_size == 0) ? + ZXDH_DTB_ITEM_SIZE : p_queue_cfg->down_item_size; + + p_dtb_mgr->queue_info[queue_id].tab_down.item_size = tab_down_item_size; + p_dtb_mgr->queue_info[queue_id].tab_down.start_phy_addr = p_queue_cfg->down_start_phy_addr; + p_dtb_mgr->queue_info[queue_id].tab_down.start_vir_addr = p_queue_cfg->down_start_vir_addr; + p_dtb_mgr->queue_info[queue_id].tab_down.wr_index = 0; + p_dtb_mgr->queue_info[queue_id].tab_down.rd_index = 0; + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = zxdh_np_dtb_item_ack_wr(dev_id, queue_id, + ZXDH_DTB_DIR_DOWN_TYPE, i, 0, ZXDH_DTB_TAB_ACK_CHECK_VALUE); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_item_ack_wr"); + } + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + rc = zxdh_np_dtb_item_ack_rd(dev_id, queue_id, + ZXDH_DTB_DIR_DOWN_TYPE, i, 0, &ack_vale); + if (ack_vale != ZXDH_DTB_TAB_ACK_CHECK_VALUE) { + PMD_DRV_LOG(ERR, "dtb queue [%d] down init failed!", queue_id); + return ZXDH_RC_DTB_MEMORY_ALLOC_ERR; + } + } + + memset((uint8_t *)(p_queue_cfg->down_start_vir_addr), 0, + tab_down_item_size * ZXDH_DTB_QUEUE_ITEM_NUM_MAX); + + PMD_DRV_LOG(INFO, "dtb queue [%d] down init success!!!", queue_id); + + return ZXDH_OK; +} + +static uint32_t +zxdh_np_dtb_queue_dump_init(uint32_t dev_id, + uint32_t queue_id, + ZXDH_DTB_QUEUE_CFG_T *p_queue_cfg) +{ + uint32_t i = 0; + uint32_t ack_vale = 0; + uint32_t tab_up_item_size = 0; + ZXDH_DTB_MGR_T *p_dtb_mgr = NULL; + + p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id); + p_dtb_mgr->queue_info[queue_id].init_flag = 1; + + tab_up_item_size = (p_queue_cfg->up_item_size == 0) ? + ZXDH_DTB_ITEM_SIZE : p_queue_cfg->up_item_size; + + p_dtb_mgr->queue_info[queue_id].tab_up.item_size = tab_up_item_size; + p_dtb_mgr->queue_info[queue_id].tab_up.start_phy_addr = p_queue_cfg->up_start_phy_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.start_vir_addr = p_queue_cfg->up_start_vir_addr; + p_dtb_mgr->queue_info[queue_id].tab_up.wr_index = 0; + p_dtb_mgr->queue_info[queue_id].tab_up.rd_index = 0; + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + zxdh_np_dtb_item_ack_wr(dev_id, queue_id, + ZXDH_DTB_DIR_UP_TYPE, i, 0, ZXDH_DTB_TAB_ACK_CHECK_VALUE); + } + + for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) { + zxdh_np_dtb_item_ack_rd(dev_id, queue_id, + ZXDH_DTB_DIR_UP_TYPE, i, 0, &ack_vale); + if (ack_vale != ZXDH_DTB_TAB_ACK_CHECK_VALUE) { + PMD_DRV_LOG(ERR, "dtb queue [%d] dump init failed!!!", queue_id); + return ZXDH_RC_DTB_MEMORY_ALLOC_ERR; + } + } + + memset((uint8_t *)(p_queue_cfg->up_start_vir_addr), 0, + tab_up_item_size * ZXDH_DTB_QUEUE_ITEM_NUM_MAX); + + PMD_DRV_LOG(INFO, "dtb queue [%d] up init success!!!", queue_id); + + return ZXDH_OK; +} + +static void +zxdh_np_dtb_down_channel_addr_set(uint32_t dev_id, + uint32_t channel_id, + uint64_t phy_addr, + uint64_t vir_addr, + uint32_t size) +{ + ZXDH_DTB_QUEUE_CFG_T down_queue_cfg = { + .down_start_phy_addr = phy_addr, + .down_start_vir_addr = vir_addr, + .down_item_size = size, + }; + + zxdh_np_dtb_queue_down_init(dev_id, channel_id, &down_queue_cfg); +} + +static void +zxdh_np_dtb_dump_channel_addr_set(uint32_t dev_id, + uint32_t channel_id, + uint64_t phy_addr, + uint64_t vir_addr, + uint32_t size) +{ + ZXDH_DTB_QUEUE_CFG_T dump_queue_cfg = { + .up_start_phy_addr = phy_addr, + .up_start_vir_addr = vir_addr, + .up_item_size = size, + }; + + zxdh_np_dtb_queue_dump_init(dev_id, channel_id, &dump_queue_cfg); +} + +static uint32_t +zxdh_np_dtb_user_info_set(uint32_t dev_id, uint32_t queue_id, uint16_t vport, uint32_t vector) +{ + uint32_t rc = ZXDH_OK; + + ZXDH_DTB_QUEUE_VM_INFO_T vm_info = {0}; + ZXDH_DTB_MGR_T *p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id); + + rc = zxdh_np_dtb_queue_vm_info_get(dev_id, queue_id, &vm_info); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_queue_vm_info_get"); + + vm_info.dbi_en = 1; + vm_info.epid = hardware_ep_id[ZXDH_EPID_BY(vport)]; + vm_info.vfunc_num = ZXDH_VFUNC_NUM(vport); + vm_info.func_num = ZXDH_FUNC_NUM(vport); + vm_info.vfunc_active = ZXDH_VF_ACTIVE(vport); + vm_info.vector = vector; + + p_dtb_mgr->queue_info[queue_id].vport = vport; + p_dtb_mgr->queue_info[queue_id].vector = vector; + + rc = zxdh_np_dtb_queue_vm_info_set(dev_id, queue_id, &vm_info); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_queue_vm_info_set"); + + return rc; +} + +static uint32_t +zxdh_np_apt_dtb_res_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl) +{ + uint32_t rc = ZXDH_OK; + + uint32_t queue_id = 0; + + rc = zxdh_np_dtb_queue_request(dev_id, p_dev_init_ctrl->port_name, + p_dev_init_ctrl->vport, &queue_id); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_queue_request"); + + p_dev_init_ctrl->queue_id = queue_id; + + rc = zxdh_np_dtb_user_info_set(dev_id, queue_id, + p_dev_init_ctrl->vport, p_dev_init_ctrl->vector); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_user_info_set"); + + zxdh_np_dtb_down_channel_addr_set(dev_id, queue_id, + p_dev_init_ctrl->down_phy_addr, p_dev_init_ctrl->down_vir_addr, 0); + + zxdh_np_dtb_dump_channel_addr_set(dev_id, queue_id, + p_dev_init_ctrl->dump_phy_addr, p_dev_init_ctrl->dump_vir_addr, 0); + + return ZXDH_OK; +} + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl) @@ -2986,5 +3262,10 @@ zxdh_np_host_init(uint32_t dev_id, zxdh_np_dev_fw_bar_msg_num_set(dev_id, bar_msg_num); + rc = zxdh_np_apt_dtb_res_init(dev_id, p_dev_init_ctrl); + ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_apt_dtb_res_init"); + + PMD_DRV_LOG(INFO, "host init done, queue_id = %u", p_dev_init_ctrl->queue_id); + return 0; } diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h index a5fde341e9..0ad068053a 100644 --- a/drivers/net/zxdh/zxdh_np.h +++ b/drivers/net/zxdh/zxdh_np.h @@ -380,6 +380,7 @@ typedef struct dpp_dev_cfg_t { ZXDH_DEV_READ_FUNC p_pcie_read_fun; ZXDH_SPINLOCK_T dtb_spinlock; ZXDH_SPINLOCK_T smmu0_spinlock; + ZXDH_SPINLOCK_T dtb_queue_spinlock[ZXDH_DTB_QUEUE_NUM_MAX]; } ZXDH_DEV_CFG_T; typedef struct zxdh_dev_mngr_t { @@ -568,6 +569,78 @@ typedef struct zxdh_dtb_eram_table_form_t { uint32_t data_l; } ZXDH_DTB_ERAM_TABLE_FORM_T; +typedef struct zxdh_dtb_zcam_table_form_t { + uint32_t valid; + uint32_t type_mode; + uint32_t ram_reg_flag; + uint32_t zgroup_id; + uint32_t zblock_id; + uint32_t zcell_id; + uint32_t mask; + uint32_t sram_addr; +} ZXDH_DTB_ZCAM_TABLE_FORM_T; + +typedef struct zxdh_dtb_etcam_table_form_t { + uint32_t valid; + uint32_t type_mode; + uint32_t block_sel; + uint32_t init_en; + uint32_t row_or_col_msk; + uint32_t vben; + uint32_t reg_tcam_flag; + uint32_t uload; + uint32_t rd_wr; + uint32_t wr_mode; + uint32_t data_or_mask; + uint32_t addr; + uint32_t vbit; +} ZXDH_DTB_ETCAM_TABLE_FORM_T; + +typedef struct zxdh_dtb_eram_dump_form_t { + uint32_t valid; + uint32_t up_type; + uint32_t base_addr; + uint32_t tb_depth; + uint32_t tb_dst_addr_h; + uint32_t tb_dst_addr_l; +} ZXDH_DTB_ERAM_DUMP_FORM_T; + +typedef struct zxdh_dtb_zcam_dump_form_t { + uint32_t valid; + uint32_t up_type; + uint32_t zgroup_id; + uint32_t zblock_id; + uint32_t ram_reg_flag; + uint32_t z_reg_cell_id; + uint32_t sram_addr; + uint32_t tb_depth; + uint32_t tb_width; + uint32_t tb_dst_addr_h; + uint32_t tb_dst_addr_l; +} ZXDH_DTB_ZCAM_DUMP_FORM_T; + +typedef struct zxdh_dtb_etcam_dump_form_t { + uint32_t valid; + uint32_t up_type; + uint32_t block_sel; + uint32_t addr; + uint32_t rd_mode; + uint32_t data_or_mask; + uint32_t tb_depth; + uint32_t tb_width; + uint32_t tb_dst_addr_h; + uint32_t tb_dst_addr_l; +} ZXDH_DTB_ETCAM_DUMP_FORM_T; + +typedef struct zxdh_etcam_dump_info_t { + uint32_t block_sel; + uint32_t addr; + uint32_t rd_mode; + uint32_t data_or_mask; + uint32_t tb_depth; + uint32_t tb_width; +} ZXDH_ETCAM_DUMP_INFO_T; + typedef struct zxdh_sdt_tbl_eram_t { uint32_t table_type; uint32_t eram_mode; @@ -594,6 +667,15 @@ typedef struct zxdh_dtb_table_t { ZXDH_DTB_FIELD_T *p_fields; } ZXDH_DTB_TABLE_T; +typedef struct zxdh_dtb_queue_cfg_t { + uint64_t up_start_phy_addr; + uint64_t up_start_vir_addr; + uint64_t down_start_phy_addr; + uint64_t down_start_vir_addr; + uint32_t up_item_size; + uint32_t down_item_size; +} ZXDH_DTB_QUEUE_CFG_T; + typedef struct zxdh_dtb_queue_item_info_t { uint32_t cmd_vld; uint32_t cmd_type; @@ -686,6 +768,11 @@ typedef enum zxdh_agent_msg_oper_e { ZXDH_WR_RD_MAX } ZXDH_MSG_OPER_E; +typedef enum zxdh_msg_dtb_oper_e { + ZXDH_QUEUE_REQUEST = 0, + ZXDH_QUEUE_RELEASE = 1, +} ZXDH_MSG_DTB_OPER_E; + typedef struct __rte_aligned(2) zxdh_version_compatible_reg_t { uint8_t version_compatible_item; uint8_t major; @@ -718,6 +805,16 @@ typedef struct __rte_aligned(2) zxdh_agent_channel_msg_t { void *msg; } ZXDH_AGENT_CHANNEL_MSG_T; +typedef struct __rte_aligned(2) zxdh_agent_channel_dtb_msg_t { + uint8_t dev_id; + uint8_t type; + uint8_t oper; + uint8_t rsv; + char name[32]; + uint32_t vport; + uint32_t queue_id; +} ZXDH_AGENT_CHANNEL_DTB_MSG_T; + int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl); int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id); int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id, -- 2.27.0