From: Bingbin Chen <chen.bingbin@zte.com.cn>
To: stephen@networkplumber.org, wang.junlong1@zte.com.cn,
yang.yonggang@zte.com.cn
Cc: dev@dpdk.org, Bingbin Chen <chen.bingbin@zte.com.cn>
Subject: [PATCH v3 03/14] net/zxdh: add agent channel
Date: Wed, 5 Mar 2025 16:13:09 +0800 [thread overview]
Message-ID: <20250305081320.1161982-4-chen.bingbin@zte.com.cn> (raw)
In-Reply-To: <20250305081320.1161982-1-chen.bingbin@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 24384 bytes --]
Add agent channel to access (np)network processor registers
that are not mapped by PCIE.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
drivers/net/zxdh/zxdh_np.c | 449 +++++++++++++++++++++++++++++++------
drivers/net/zxdh/zxdh_np.h | 63 +++++-
2 files changed, 442 insertions(+), 70 deletions(-)
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index 5ed78fde82..8113332310 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -394,6 +394,70 @@ zxdh_np_dev_init(void)
return 0;
}
+static void
+zxdh_np_dev_vport_get(uint32_t dev_id, uint32_t *vport)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *vport = p_dev_info->vport;
+}
+
+static void
+zxdh_np_dev_agent_addr_get(uint32_t dev_id, uint64_t *agent_addr)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *agent_addr = p_dev_info->agent_addr;
+}
+
+static void
+zxdh_np_dev_fw_bar_msg_num_set(uint32_t dev_id, uint32_t bar_msg_num)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ p_dev_info->fw_bar_msg_num = bar_msg_num;
+
+ PMD_DRV_LOG(INFO, "fw_bar_msg_num_set:fw support agent msg num = %u!", bar_msg_num);
+}
+
+static void
+zxdh_np_dev_fw_bar_msg_num_get(uint32_t dev_id, uint32_t *bar_msg_num)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *bar_msg_num = p_dev_info->fw_bar_msg_num;
+}
+
+static uint32_t
+zxdh_np_dev_opr_spinlock_get(uint32_t dev_id, uint32_t type, ZXDH_SPINLOCK_T **p_spinlock_out)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ if (p_dev_info == NULL) {
+ PMD_DRV_LOG(ERR, "Get dev_info[ %d ] fail!", dev_id);
+ return ZXDH_DEV_TYPE_INVALID;
+ }
+
+ switch (type) {
+ case ZXDH_DEV_SPINLOCK_T_DTB:
+ *p_spinlock_out = &p_dev_info->dtb_spinlock;
+ break;
+ case ZXDH_DEV_SPINLOCK_T_SMMU0:
+ *p_spinlock_out = &p_dev_info->smmu0_spinlock;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "spinlock type is invalid!");
+ return ZXDH_ERR;
+ }
+
+ return ZXDH_OK;
+}
+
static uint32_t
zxdh_np_dev_read_channel(uint32_t dev_id, uint32_t addr, uint32_t size, uint32_t *p_data)
{
@@ -827,6 +891,9 @@ zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type,
p_dev_info->p_pcie_write_fun = zxdh_np_dev_pcie_default_write;
p_dev_info->p_pcie_read_fun = zxdh_np_dev_pcie_default_read;
+ rte_spinlock_init(&p_dev_info->dtb_spinlock.spinlock);
+ rte_spinlock_init(&p_dev_info->smmu0_spinlock.spinlock);
+
return 0;
}
@@ -918,6 +985,269 @@ zxdh_np_ppu_parse_cls_bitmap(uint32_t dev_id,
}
}
+static void
+zxdh_np_agent_msg_prt(uint8_t type, uint32_t rtn)
+{
+ switch (rtn) {
+ case ZXDH_RC_CTRLCH_MSG_LEN_ZERO:
+ PMD_DRV_LOG(ERR, "type[%u]:msg len is zero!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_PRO_ERR:
+ PMD_DRV_LOG(ERR, "type[%u]:msg process error!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support the msg!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_OPER_NOT_SUPPORT:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support opr of the msg!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_DROP:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support,drop msg!", type);
+ break;
+ default:
+ break;
+ }
+}
+
+static uint32_t
+zxdh_np_agent_bar_msg_check(uint32_t dev_id, ZXDH_AGENT_CHANNEL_MSG_T *p_msg)
+{
+ uint8_t type = 0;
+ uint32_t bar_msg_num = 0;
+
+ type = *((uint8_t *)(p_msg->msg) + 1);
+ if (type != ZXDH_PCIE_BAR_MSG) {
+ zxdh_np_dev_fw_bar_msg_num_get(dev_id, &bar_msg_num);
+ if (type >= bar_msg_num) {
+ PMD_DRV_LOG(ERR, "type[%u] > fw_bar_msg_num[%u]!", type, bar_msg_num);
+ return ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT;
+ }
+ }
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_agent_channel_sync_send(uint32_t dev_id,
+ ZXDH_AGENT_CHANNEL_MSG_T *p_msg,
+ uint32_t *p_data,
+ uint32_t rep_len)
+{
+ uint32_t ret = ZXDH_OK;
+ uint32_t vport = 0;
+ struct zxdh_pci_bar_msg in = {0};
+ struct zxdh_msg_recviver_mem result = {0};
+ uint32_t *recv_buffer = NULL;
+ uint8_t *reply_ptr = NULL;
+ uint16_t reply_msg_len = 0;
+ uint64_t agent_addr = 0;
+
+ ret = zxdh_np_agent_bar_msg_check(dev_id, p_msg);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_bar_msg_check failed!");
+ return ret;
+ }
+
+ zxdh_np_dev_vport_get(dev_id, &vport);
+ zxdh_np_dev_agent_addr_get(dev_id, &agent_addr);
+
+ if (ZXDH_IS_PF(vport))
+ in.src = ZXDH_MSG_CHAN_END_PF;
+ else
+ in.src = ZXDH_MSG_CHAN_END_VF;
+
+ in.virt_addr = agent_addr;
+ in.payload_addr = p_msg->msg;
+ in.payload_len = p_msg->msg_len;
+ in.dst = ZXDH_MSG_CHAN_END_RISC;
+ in.module_id = ZXDH_BAR_MDOULE_NPSDK;
+
+ recv_buffer = (uint32_t *)rte_zmalloc(NULL, rep_len + ZXDH_CHANNEL_REPS_LEN, 0);
+ if (recv_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "malloc memory failed");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ result.buffer_len = rep_len + ZXDH_CHANNEL_REPS_LEN;
+ result.recv_buffer = recv_buffer;
+
+ ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+ if (ret == ZXDH_BAR_MSG_OK) {
+ reply_ptr = (uint8_t *)(result.recv_buffer);
+ if (*reply_ptr == 0XFF) {
+ reply_msg_len = *(uint16_t *)(reply_ptr + 1);
+ memcpy(p_data, reply_ptr + 4,
+ ((reply_msg_len > rep_len) ? rep_len : reply_msg_len));
+ } else {
+ PMD_DRV_LOG(ERR, "Message not replied");
+ }
+ } else {
+ PMD_DRV_LOG(ERR, "Error[0x%x], bar msg send failed!", ret);
+ }
+
+ rte_free(recv_buffer);
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_reg_sync_send(uint32_t dev_id,
+ ZXDH_AGENT_CHANNEL_REG_MSG_T *p_msg, uint32_t *p_data, uint32_t rep_len)
+{
+ uint32_t ret = ZXDH_OK;
+ ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_msg);
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)p_msg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_REG_MSG_T),
+ };
+
+ ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, p_data, rep_len);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed");
+ return ZXDH_ERR;
+ }
+
+ ret = *p_data;
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed in buffer");
+ return ZXDH_ERR;
+ }
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_pcie_bar_request(uint32_t dev_id,
+ uint32_t *p_bar_msg_num)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t rsp_buff[2] = {0};
+ uint32_t msg_result = 0;
+ uint32_t bar_msg_num = 0;
+ ZXDH_AGENT_PCIE_BAR_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_PCIE_BAR_MSG,
+ .oper = ZXDH_BAR_MSG_NUM_REQ,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_PCIE_BAR_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed!");
+ return rc;
+ }
+
+ msg_result = rsp_buff[0];
+ bar_msg_num = rsp_buff[1];
+
+ zxdh_np_agent_msg_prt(msgcfg.type, msg_result);
+
+ *p_bar_msg_num = bar_msg_num;
+
+ return msg_result;
+}
+
+static uint32_t
+zxdh_np_agent_channel_reg_read(uint32_t dev_id,
+ uint32_t reg_type,
+ uint32_t reg_no,
+ uint32_t reg_width,
+ uint32_t addr,
+ uint32_t *p_data)
+{
+ uint32_t ret = 0;
+ ZXDH_AGENT_CHANNEL_REG_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_REG_MSG,
+ .subtype = reg_type,
+ .oper = ZXDH_RD,
+ .reg_no = reg_no,
+ .addr = addr,
+ .val_len = reg_width / 4,
+ };
+
+ uint32_t resp_len = reg_width + 4;
+ uint8_t *resp_buffer = (uint8_t *)rte_zmalloc(NULL, resp_len, 0);
+ if (resp_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "malloc memory failed");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ ret = zxdh_np_agent_channel_reg_sync_send(dev_id,
+ &msgcfg, (uint32_t *)resp_buffer, resp_len);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %d reg_no %d send agent read failed.", dev_id, reg_no);
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ if (*((uint32_t *)resp_buffer) != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %d reg_no %d agent read resp err %d .",
+ dev_id, reg_no, *((uint32_t *)resp_buffer));
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ memcpy(p_data, resp_buffer + 4, reg_width);
+
+ rte_free(resp_buffer);
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_reg_write(uint32_t dev_id,
+ uint32_t reg_type,
+ uint32_t reg_no,
+ uint32_t reg_width,
+ uint32_t addr,
+ uint32_t *p_data)
+{
+ uint32_t ret = ZXDH_OK;
+ ZXDH_AGENT_CHANNEL_REG_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_REG_MSG,
+ .subtype = reg_type,
+ .oper = ZXDH_WR,
+ .reg_no = reg_no,
+ .addr = addr,
+ .val_len = reg_width / 4,
+ };
+
+ memcpy(msgcfg.val, p_data, reg_width);
+
+ uint32_t resp_len = reg_width + 4;
+ uint8_t *resp_buffer = (uint8_t *)rte_zmalloc(NULL, resp_len, 0);
+ if (resp_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "malloc memory failed");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ ret = zxdh_np_agent_channel_reg_sync_send(dev_id,
+ &msgcfg, (uint32_t *)resp_buffer, resp_len);
+
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %d reg_no %d send agent write failed.", dev_id, reg_no);
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ if (*((uint32_t *)resp_buffer) != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %d reg_no %d agent write resp err %d .",
+ dev_id, reg_no, *((uint32_t *)resp_buffer));
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ memcpy(p_data, resp_buffer + 4, reg_width);
+
+ rte_free(resp_buffer);
+
+ return ret;
+}
+
static ZXDH_DTB_MGR_T *
zxdh_np_dtb_mgr_get(uint32_t dev_id)
{
@@ -1128,6 +1458,24 @@ zxdh_np_np_sdk_version_compatible_check(uint32_t dev_id)
return ZXDH_OK;
}
+static uint32_t
+zxdh_np_pcie_bar_msg_num_get(uint32_t dev_id, uint32_t *p_bar_msg_num)
+{
+ uint32_t rc = ZXDH_OK;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+ rc = zxdh_np_agent_channel_pcie_bar_request(dev_id, p_bar_msg_num);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_pcie_bar_request");
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
static ZXDH_RISCV_DTB_MGR *
zxdh_np_riscv_dtb_queue_mgr_get(uint32_t dev_id)
{
@@ -1246,12 +1594,19 @@ zxdh_np_reg_read(uint32_t dev_id, uint32_t reg_no,
uint32_t i;
uint32_t addr = 0;
uint32_t reg_module = p_reg_info->module_no;
+ uint32_t reg_width = p_reg_info->width;
+ uint32_t reg_real_no = p_reg_info->reg_no;
+ uint32_t reg_type = p_reg_info->flags;
addr = zxdh_np_reg_get_reg_addr(reg_no, m_offset, n_offset);
if (reg_module == DTB4K) {
rc = p_reg_info->p_read_fun(dev_id, addr, p_buff);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "p_reg_info->p_read_fun");
+ } else {
+ rc = zxdh_np_agent_channel_reg_read(dev_id,
+ reg_type, reg_real_no, reg_width, addr, p_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_reg_read");
}
if (!zxdh_np_comm_is_big_endian()) {
@@ -1383,6 +1738,9 @@ zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no,
uint32_t i;
uint32_t addr = 0;
uint32_t reg_module = p_reg_info->module_no;
+ uint32_t reg_width = p_reg_info->width;
+ uint32_t reg_type = p_reg_info->flags;
+ uint32_t reg_real_no = p_reg_info->reg_no;
for (i = 0; i < p_reg_info->field_num; i++) {
if (p_field_info[i].len <= 32) {
@@ -1417,6 +1775,10 @@ zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no,
if (reg_module == DTB4K) {
rc = p_reg_info->p_write_fun(dev_id, addr, p_buff);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "p_reg_info->p_write_fun");
+ } else {
+ rc = zxdh_np_agent_channel_reg_write(dev_id,
+ reg_type, reg_real_no, reg_width, addr, p_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_reg_write");
}
return rc;
@@ -1790,7 +2152,7 @@ zxdh_np_dtb_eram_one_entry(uint32_t dev_id,
uint32_t base_addr;
uint32_t index;
uint32_t opr_mode;
- uint32_t rc;
+ uint32_t rc = ZXDH_OK;
ZXDH_COMM_CHECK_POINT(pdata);
ZXDH_COMM_CHECK_POINT(p_dtb_one_entry);
@@ -2687,6 +3049,7 @@ zxdh_np_host_init(uint32_t dev_id,
ZXDH_SYS_INIT_CTRL_T sys_init_ctrl = {0};
uint32_t rc;
uint64_t agent_addr;
+ uint32_t bar_msg_num = 0;
ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_dev_init_ctrl);
@@ -2708,6 +3071,11 @@ zxdh_np_host_init(uint32_t dev_id,
rc = zxdh_np_np_sdk_version_compatible_check(dev_id);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_np_sdk_version_compatible_check");
+ rc = zxdh_np_pcie_bar_msg_num_get(dev_id, &bar_msg_num);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_pcie_bar_msg_num_get");
+
+ zxdh_np_dev_fw_bar_msg_num_set(dev_id, bar_msg_num);
+
return 0;
}
@@ -2942,59 +3310,7 @@ zxdh_np_stat_ppu_cnt_get_ex(uint32_t dev_id,
}
static uint32_t
-zxdh_np_agent_channel_sync_send(ZXDH_AGENT_CHANNEL_MSG_T *p_msg,
- uint32_t *p_data,
- uint32_t rep_len)
-{
- uint32_t ret = 0;
- uint32_t vport = 0;
- struct zxdh_pci_bar_msg in = {0};
- struct zxdh_msg_recviver_mem result = {0};
- uint32_t *recv_buffer;
- uint8_t *reply_ptr = NULL;
- uint16_t reply_msg_len = 0;
- uint64_t agent_addr = 0;
-
- if (ZXDH_IS_PF(vport))
- in.src = ZXDH_MSG_CHAN_END_PF;
- else
- in.src = ZXDH_MSG_CHAN_END_VF;
-
- in.virt_addr = agent_addr;
- in.payload_addr = p_msg->msg;
- in.payload_len = p_msg->msg_len;
- in.dst = ZXDH_MSG_CHAN_END_RISC;
- in.module_id = ZXDH_BAR_MDOULE_NPSDK;
-
- recv_buffer = (uint32_t *)rte_zmalloc(NULL, rep_len + ZXDH_CHANNEL_REPS_LEN, 0);
- if (recv_buffer == NULL) {
- PMD_DRV_LOG(ERR, "%s point null!", __func__);
- return ZXDH_PAR_CHK_POINT_NULL;
- }
-
- result.buffer_len = rep_len + ZXDH_CHANNEL_REPS_LEN;
- result.recv_buffer = recv_buffer;
-
- ret = zxdh_bar_chan_sync_msg_send(&in, &result);
- if (ret == ZXDH_BAR_MSG_OK) {
- reply_ptr = (uint8_t *)(result.recv_buffer);
- if (*reply_ptr == 0XFF) {
- reply_msg_len = *(uint16_t *)(reply_ptr + 1);
- memcpy(p_data, reply_ptr + 4,
- ((reply_msg_len > rep_len) ? rep_len : reply_msg_len));
- } else {
- PMD_DRV_LOG(ERR, "Message not replied");
- }
- } else {
- PMD_DRV_LOG(ERR, "Error[0x%x], %s failed!", ret, __func__);
- }
-
- rte_free(recv_buffer);
- return ret;
-}
-
-static uint32_t
-zxdh_np_agent_channel_plcr_sync_send(ZXDH_AGENT_CHANNEL_PLCR_MSG_T *p_msg,
+zxdh_np_agent_channel_plcr_sync_send(uint32_t dev_id, ZXDH_AGENT_CHANNEL_PLCR_MSG_T *p_msg,
uint32_t *p_data, uint32_t rep_len)
{
uint32_t ret = 0;
@@ -3003,7 +3319,7 @@ zxdh_np_agent_channel_plcr_sync_send(ZXDH_AGENT_CHANNEL_PLCR_MSG_T *p_msg,
agent_msg.msg = (void *)p_msg;
agent_msg.msg_len = sizeof(ZXDH_AGENT_CHANNEL_PLCR_MSG_T);
- ret = zxdh_np_agent_channel_sync_send(&agent_msg, p_data, rep_len);
+ ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, p_data, rep_len);
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: agent_channel_sync_send failed.", __func__);
return 1;
@@ -3013,7 +3329,7 @@ zxdh_np_agent_channel_plcr_sync_send(ZXDH_AGENT_CHANNEL_PLCR_MSG_T *p_msg,
}
static uint32_t
-zxdh_np_agent_channel_plcr_profileid_request(uint32_t vport,
+zxdh_np_agent_channel_plcr_profileid_request(uint32_t dev_id, uint32_t vport,
uint32_t car_type, uint32_t *p_profileid)
{
uint32_t ret = 0;
@@ -3028,7 +3344,7 @@ zxdh_np_agent_channel_plcr_profileid_request(uint32_t vport,
msgcfg.car_type = car_type;
msgcfg.profile_id = 0xFFFF;
- ret = zxdh_np_agent_channel_plcr_sync_send(&msgcfg,
+ ret = zxdh_np_agent_channel_plcr_sync_send(dev_id, &msgcfg,
resp_buffer, sizeof(resp_buffer));
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: agent_channel_plcr_sync_send failed.", __func__);
@@ -3041,7 +3357,8 @@ zxdh_np_agent_channel_plcr_profileid_request(uint32_t vport,
}
static uint32_t
-zxdh_np_agent_channel_plcr_car_rate(uint32_t car_type,
+zxdh_np_agent_channel_plcr_car_rate(uint32_t dev_id,
+ uint32_t car_type,
uint32_t pkt_sign,
uint32_t profile_id __rte_unused,
void *p_car_profile_cfg)
@@ -3071,7 +3388,7 @@ zxdh_np_agent_channel_plcr_car_rate(uint32_t car_type,
agent_msg.msg = (void *)&msgpktcfg;
agent_msg.msg_len = sizeof(ZXDH_AGENT_CAR_PKT_PROFILE_MSG_T);
- ret = zxdh_np_agent_channel_sync_send(&agent_msg, resp_buffer, resp_len);
+ ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, resp_buffer, resp_len);
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: stat_car_a_type failed.", __func__);
return 1;
@@ -3105,7 +3422,7 @@ zxdh_np_agent_channel_plcr_car_rate(uint32_t car_type,
agent_msg.msg = (void *)&msgcfg;
agent_msg.msg_len = sizeof(ZXDH_AGENT_CAR_PROFILE_MSG_T);
- ret = zxdh_np_agent_channel_sync_send(&agent_msg, resp_buffer, resp_len);
+ ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, resp_buffer, resp_len);
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: stat_car_b_type failed.", __func__);
return 1;
@@ -3118,7 +3435,7 @@ zxdh_np_agent_channel_plcr_car_rate(uint32_t car_type,
}
static uint32_t
-zxdh_np_agent_channel_plcr_profileid_release(uint32_t vport,
+zxdh_np_agent_channel_plcr_profileid_release(uint32_t dev_id, uint32_t vport,
uint32_t car_type __rte_unused,
uint32_t profileid)
{
@@ -3133,7 +3450,7 @@ zxdh_np_agent_channel_plcr_profileid_release(uint32_t vport,
msgcfg.vport = vport;
msgcfg.profile_id = profileid;
- ret = zxdh_np_agent_channel_plcr_sync_send(&msgcfg,
+ ret = zxdh_np_agent_channel_plcr_sync_send(dev_id, &msgcfg,
resp_buffer, sizeof(resp_buffer));
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: agent_channel_plcr_sync_send failed.", __func__);
@@ -3234,7 +3551,7 @@ zxdh_np_car_profile_id_add(uint32_t vport_id,
PMD_DRV_LOG(ERR, "%s: profile_id point null!", __func__);
return ZXDH_PAR_CHK_POINT_NULL;
}
- ret = zxdh_np_agent_channel_plcr_profileid_request(vport_id, flags, profile_id);
+ ret = zxdh_np_agent_channel_plcr_profileid_request(0, vport_id, flags, profile_id);
profile_id_h = *(profile_id + 1);
profile_id_l = *profile_id;
@@ -3259,8 +3576,9 @@ zxdh_np_car_profile_cfg_set(uint32_t vport_id __rte_unused,
void *p_car_profile_cfg)
{
uint32_t ret = 0;
+ uint32_t dev_id = 0;
- ret = zxdh_np_agent_channel_plcr_car_rate(car_type,
+ ret = zxdh_np_agent_channel_plcr_car_rate(dev_id, car_type,
pkt_sign, profile_id, p_car_profile_cfg);
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: plcr_car_rate set failed!", __func__);
@@ -3276,10 +3594,11 @@ zxdh_np_car_profile_id_delete(uint32_t vport_id,
{
uint32_t ret = 0;
uint32_t profileid = 0;
+ uint32_t dev_id = 0;
profileid = profile_id & 0xFFFF;
- ret = zxdh_np_agent_channel_plcr_profileid_release(vport_id, flags, profileid);
+ ret = zxdh_np_agent_channel_plcr_profileid_release(dev_id, vport_id, flags, profileid);
if (ret != 0) {
PMD_DRV_LOG(ERR, "%s: plcr profiled id release failed!", __func__);
return 1;
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index d6ceecf3f8..c8fc8ea613 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -112,9 +112,17 @@
#define ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL \
(ZXDH_SE_SMMU0_ERAM_BLOCK_NUM * ZXDH_SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK)
+#define ZXDH_CHANNEL_REPS_LEN (4)
+
#define ZXDH_NPSDK_COMPAT_ITEM_ID (10)
#define ZXDH_DPU_NO_DEBUG_PF_COMPAT_REG_OFFSET (0x5400)
+#define ZXDH_VF_ACTIVE(VPORT) (((VPORT) & 0x0800) >> 11)
+#define ZXDH_EPID_BY(VPORT) (((VPORT) & 0x7000) >> 12)
+#define ZXDH_FUNC_NUM(VPORT) (((VPORT) & 0x0700) >> 8)
+#define ZXDH_VFUNC_NUM(VPORT) (((VPORT) & 0x00FF))
+#define ZXDH_IS_PF(VPORT) (!ZXDH_VF_ACTIVE(VPORT))
+
/**errco code */
#define ZXDH_RC_BASE (0x1000U)
#define ZXDH_PARAMETER_CHK_BASE (ZXDH_RC_BASE | 0x200)
@@ -127,6 +135,12 @@
#define ZXDH_PAR_CHK_INVALID_DEV_ID (ZXDH_PARAMETER_CHK_BASE | 0x007)
#define ZXDH_PAR_CHK_INVALID_PARA (ZXDH_PARAMETER_CHK_BASE | 0x008)
+#define ZXDH_SPIN_LOCK_BASE (ZXDH_RC_BASE | 0x300)
+#define ZXDH_SPIN_LOCK_INIT_FAIL (ZXDH_SPIN_LOCK_BASE | 0x001)
+#define ZXDH_SPIN_LOCK_LOCK_FAIL (ZXDH_SPIN_LOCK_BASE | 0x002)
+#define ZXDH_SPIN_LOCK_ULOCK_FAIL (ZXDH_SPIN_LOCK_BASE | 0X003)
+#define ZXDH_SPIN_LOCK_DESTROY_FAIL (ZXDH_SPIN_LOCK_BASE | 0X004)
+
#define ZXDH_ERAM128_BADDR_MASK (0x3FFFF80)
#define ZXDH_DTB_TABLE_MODE_ERAM (0)
@@ -164,6 +178,13 @@
#define ZXDH_RC_DTB_SEARCH_VPORT_QUEUE_ZERO (ZXDH_RC_DTB_BASE | 0x17)
#define ZXDH_RC_DTB_QUEUE_NOT_ENABLE (ZXDH_RC_DTB_BASE | 0x18)
+#define ZXDH_RC_CTRLCH_BASE (0xf00)
+#define ZXDH_RC_CTRLCH_MSG_LEN_ZERO (ZXDH_RC_CTRLCH_BASE | 0x0)
+#define ZXDH_RC_CTRLCH_MSG_PRO_ERR (ZXDH_RC_CTRLCH_BASE | 0x1)
+#define ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT (ZXDH_RC_CTRLCH_BASE | 0x2)
+#define ZXDH_RC_CTRLCH_MSG_OPER_NOT_SUPPORT (ZXDH_RC_CTRLCH_BASE | 0x3)
+#define ZXDH_RC_CTRLCH_MSG_DROP (ZXDH_RC_CTRLCH_BASE | 0x4)
+
#define ZXDH_SCHE_RSP_LEN (2)
#define ZXDH_G_PROFILE_ID_LEN (8)
@@ -380,6 +401,7 @@ typedef struct dpp_dev_cfg_t {
uint32_t access_type;
uint32_t agent_flag;
uint32_t vport;
+ uint32_t fw_bar_msg_num;
uint64_t pcie_addr;
uint64_t riscv_addr;
uint64_t dma_vir_addr;
@@ -388,6 +410,8 @@ typedef struct dpp_dev_cfg_t {
uint32_t init_flags[ZXDH_MODULE_INIT_MAX];
ZXDH_DEV_WRITE_FUNC p_pcie_write_fun;
ZXDH_DEV_READ_FUNC p_pcie_read_fun;
+ ZXDH_SPINLOCK_T dtb_spinlock;
+ ZXDH_SPINLOCK_T smmu0_spinlock;
} ZXDH_DEV_CFG_T;
typedef struct zxdh_dev_mngr_t {
@@ -662,6 +686,17 @@ typedef enum zxdh_stat_cnt_mode_e {
ZXDH_STAT_MAX_MODE,
} ZXDH_STAT_CNT_MODE_E;
+typedef enum zxdh_agent_pcie_bar_e {
+ ZXDH_BAR_MSG_NUM_REQ = 0,
+ ZXDH_PCIE_BAR_MAX
+} ZXDH_MSG_PCIE_BAR_E;
+
+typedef enum zxdh_agent_msg_oper_e {
+ ZXDH_WR = 0,
+ ZXDH_RD,
+ ZXDH_WR_RD_MAX
+} ZXDH_MSG_OPER_E;
+
typedef struct zxdh_smmu0_smmu0_cpu_ind_cmd_t {
uint32_t cpu_ind_rw;
uint32_t cpu_ind_rd_mode;
@@ -776,11 +811,6 @@ typedef struct dpp_agent_car_pkt_profile_msg {
uint32_t pri[ZXDH_CAR_PRI_MAX];
} ZXDH_AGENT_CAR_PKT_PROFILE_MSG_T;
-typedef struct zxdh_agent_channel_msg_t {
- uint32_t msg_len;
- void *msg;
-} ZXDH_AGENT_CHANNEL_MSG_T;
-
typedef struct zxdh_agent_channel_plcr_msg {
uint8_t dev_id;
uint8_t type;
@@ -858,6 +888,29 @@ typedef struct __rte_aligned(2) zxdh_version_compatible_reg_t {
uint8_t rsv[2];
} ZXDH_VERSION_COMPATIBLE_REG_T;
+typedef struct __rte_aligned(2) zxdh_agent_channel_pcie_bar_msg_t {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t oper;
+ uint8_t rsv;
+} ZXDH_AGENT_PCIE_BAR_MSG_T;
+
+typedef struct __rte_aligned(2) zxdh_agent_channel_reg_msg {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t subtype;
+ uint8_t oper;
+ uint32_t reg_no;
+ uint32_t addr;
+ uint32_t val_len;
+ uint32_t val[32];
+} ZXDH_AGENT_CHANNEL_REG_MSG_T;
+
+typedef struct __rte_aligned(2) zxdh_agent_channel_msg_t {
+ uint32_t msg_len;
+ void *msg;
+} ZXDH_AGENT_CHANNEL_MSG_T;
+
int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 55065 bytes --]
next prev parent reply other threads:[~2025-03-05 8:28 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-10 1:44 [PATCH v1 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-10 1:46 ` [PATCH v1 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-10 17:25 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-10 17:28 ` Stephen Hemminger
2025-02-10 17:30 ` Stephen Hemminger
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 18:23 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 1:48 ` [PATCH v1 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-10 17:33 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-10 17:36 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-10 17:40 ` Stephen Hemminger
2025-02-10 17:43 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-10 17:45 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-10 17:46 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-10 17:47 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 18:19 ` Stephen Hemminger
2025-02-22 7:22 ` [PATCH v2 00/14] add network processor ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-22 17:34 ` Stephen Hemminger
2025-03-05 8:13 ` [PATCH v3 00/14] net/zxdh: add network processor ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-03-05 8:13 ` Bingbin Chen [this message]
2025-03-05 8:13 ` [PATCH v3 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 14/14] net/zxdh: modify parameters of the plcr function Bingbin Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250305081320.1161982-4-chen.bingbin@zte.com.cn \
--to=chen.bingbin@zte.com.cn \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
--cc=wang.junlong1@zte.com.cn \
--cc=yang.yonggang@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).