From: Bingbin Chen <chen.bingbin@zte.com.cn>
To: stephen@networkplumber.org, wang.junlong1@zte.com.cn,
yang.yonggang@zte.com.cn
Cc: dev@dpdk.org, Bingbin Chen <chen.bingbin@zte.com.cn>
Subject: [PATCH v4 03/14] net/zxdh: add agent channel
Date: Mon, 17 Mar 2025 22:57:51 +0800 [thread overview]
Message-ID: <20250317145802.1819809-4-chen.bingbin@zte.com.cn> (raw)
In-Reply-To: <20250317145802.1819809-1-chen.bingbin@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 16208 bytes --]
Add agent channel to access (np)network processor registers
that are not mapped by PCIE.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
drivers/net/zxdh/zxdh_np.c | 315 ++++++++++++++++++++++++++++++++++++-
drivers/net/zxdh/zxdh_np.h | 53 +++++++
2 files changed, 367 insertions(+), 1 deletion(-)
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index f0848658ac..00e02cb36f 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -480,6 +480,70 @@ zxdh_np_dev_init(void)
return 0;
}
+static void
+zxdh_np_dev_vport_get(uint32_t dev_id, uint32_t *vport)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *vport = p_dev_info->vport;
+}
+
+static void
+zxdh_np_dev_agent_addr_get(uint32_t dev_id, uint64_t *agent_addr)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *agent_addr = p_dev_info->agent_addr;
+}
+
+static void
+zxdh_np_dev_fw_bar_msg_num_set(uint32_t dev_id, uint32_t bar_msg_num)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ p_dev_info->fw_bar_msg_num = bar_msg_num;
+
+ PMD_DRV_LOG(INFO, "fw_bar_msg_num_set:fw support agent msg num = %u!", bar_msg_num);
+}
+
+static void
+zxdh_np_dev_fw_bar_msg_num_get(uint32_t dev_id, uint32_t *bar_msg_num)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ *bar_msg_num = p_dev_info->fw_bar_msg_num;
+}
+
+static uint32_t
+zxdh_np_dev_opr_spinlock_get(uint32_t dev_id, uint32_t type, ZXDH_SPINLOCK_T **p_spinlock_out)
+{
+ ZXDH_DEV_MGR_T *p_dev_mgr = &g_dev_mgr;
+ ZXDH_DEV_CFG_T *p_dev_info = p_dev_mgr->p_dev_array[dev_id];
+
+ if (p_dev_info == NULL) {
+ PMD_DRV_LOG(ERR, "Get dev_info[ %u ] fail!", dev_id);
+ return ZXDH_DEV_TYPE_INVALID;
+ }
+
+ switch (type) {
+ case ZXDH_DEV_SPINLOCK_T_DTB:
+ *p_spinlock_out = &p_dev_info->dtb_spinlock;
+ break;
+ case ZXDH_DEV_SPINLOCK_T_SMMU0:
+ *p_spinlock_out = &p_dev_info->smmu0_spinlock;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "spinlock type is invalid!");
+ return ZXDH_ERR;
+ }
+
+ return ZXDH_OK;
+}
+
static uint32_t
zxdh_np_dev_read_channel(uint32_t dev_id, uint32_t addr, uint32_t size, uint32_t *p_data)
{
@@ -908,6 +972,9 @@ zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type,
p_dev_info->p_pcie_write_fun = zxdh_np_dev_pcie_default_write;
p_dev_info->p_pcie_read_fun = zxdh_np_dev_pcie_default_read;
+ rte_spinlock_init(&p_dev_info->dtb_spinlock.spinlock);
+ rte_spinlock_init(&p_dev_info->smmu0_spinlock.spinlock);
+
return ZXDH_OK;
}
@@ -999,6 +1066,48 @@ zxdh_np_ppu_parse_cls_bitmap(uint32_t dev_id,
}
}
+static void
+zxdh_np_agent_msg_prt(uint8_t type, uint32_t rtn)
+{
+ switch (rtn) {
+ case ZXDH_RC_CTRLCH_MSG_LEN_ZERO:
+ PMD_DRV_LOG(ERR, "type[%u]:msg len is zero!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_PRO_ERR:
+ PMD_DRV_LOG(ERR, "type[%u]:msg process error!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support the msg!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_OPER_NOT_SUPPORT:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support opr of the msg!", type);
+ break;
+ case ZXDH_RC_CTRLCH_MSG_DROP:
+ PMD_DRV_LOG(ERR, "type[%u]:fw not support,drop msg!", type);
+ break;
+ default:
+ break;
+ }
+}
+
+static uint32_t
+zxdh_np_agent_bar_msg_check(uint32_t dev_id, ZXDH_AGENT_CHANNEL_MSG_T *p_msg)
+{
+ uint8_t type = 0;
+ uint32_t bar_msg_num = 0;
+
+ type = *((uint8_t *)(p_msg->msg) + 1);
+ if (type != ZXDH_PCIE_BAR_MSG) {
+ zxdh_np_dev_fw_bar_msg_num_get(dev_id, &bar_msg_num);
+ if (type >= bar_msg_num) {
+ PMD_DRV_LOG(ERR, "type[%u] > fw_bar_msg_num[%u]!", type, bar_msg_num);
+ return ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT;
+ }
+ }
+
+ return ZXDH_OK;
+}
+
static uint32_t
zxdh_np_agent_channel_sync_send(uint32_t dev_id,
ZXDH_AGENT_CHANNEL_MSG_T *p_msg,
@@ -1014,7 +1123,14 @@ zxdh_np_agent_channel_sync_send(uint32_t dev_id,
uint16_t reply_msg_len = 0;
uint64_t agent_addr = 0;
- PMD_DRV_LOG(DEBUG, "dev_id:0x%x", dev_id);
+ ret = zxdh_np_agent_bar_msg_check(dev_id, p_msg);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_bar_msg_check failed!");
+ return ret;
+ }
+
+ zxdh_np_dev_vport_get(dev_id, &vport);
+ zxdh_np_dev_agent_addr_get(dev_id, &agent_addr);
if (ZXDH_IS_PF(vport))
in.src = ZXDH_MSG_CHAN_END_PF;
@@ -1054,6 +1170,165 @@ zxdh_np_agent_channel_sync_send(uint32_t dev_id,
return ret;
}
+static uint32_t
+zxdh_np_agent_channel_reg_sync_send(uint32_t dev_id,
+ ZXDH_AGENT_CHANNEL_REG_MSG_T *p_msg, uint32_t *p_data, uint32_t rep_len)
+{
+ uint32_t ret = ZXDH_OK;
+ ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_msg);
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)p_msg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_REG_MSG_T),
+ };
+
+ ret = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, p_data, rep_len);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed");
+ return ZXDH_ERR;
+ }
+
+ ret = *p_data;
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed in buffer");
+ return ZXDH_ERR;
+ }
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_pcie_bar_request(uint32_t dev_id,
+ uint32_t *p_bar_msg_num)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t rsp_buff[2] = {0};
+ uint32_t msg_result = 0;
+ uint32_t bar_msg_num = 0;
+ ZXDH_AGENT_PCIE_BAR_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_PCIE_BAR_MSG,
+ .oper = ZXDH_BAR_MSG_NUM_REQ,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_PCIE_BAR_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "zxdh_np_agent_channel_sync_send failed!");
+ return rc;
+ }
+
+ msg_result = rsp_buff[0];
+ bar_msg_num = rsp_buff[1];
+
+ zxdh_np_agent_msg_prt(msgcfg.type, msg_result);
+
+ *p_bar_msg_num = bar_msg_num;
+
+ return msg_result;
+}
+
+static uint32_t
+zxdh_np_agent_channel_reg_read(uint32_t dev_id,
+ uint32_t reg_type,
+ uint32_t reg_no,
+ uint32_t reg_width,
+ uint32_t addr,
+ uint32_t *p_data)
+{
+ uint32_t ret = 0;
+ ZXDH_AGENT_CHANNEL_REG_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_REG_MSG,
+ .subtype = reg_type,
+ .oper = ZXDH_RD,
+ .reg_no = reg_no,
+ .addr = addr,
+ .val_len = reg_width / 4,
+ };
+
+ uint32_t resp_len = reg_width + 4;
+ uint8_t *resp_buffer = rte_zmalloc(NULL, resp_len, 0);
+ if (resp_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "malloc memory failed");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ ret = zxdh_np_agent_channel_reg_sync_send(dev_id,
+ &msgcfg, (uint32_t *)resp_buffer, resp_len);
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %u reg_no %u send agent read failed.", dev_id, reg_no);
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ if (*((uint32_t *)resp_buffer) != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %u reg_no %u agent read resp err %u .",
+ dev_id, reg_no, *((uint32_t *)resp_buffer));
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ memcpy(p_data, resp_buffer + 4, reg_width);
+
+ rte_free(resp_buffer);
+
+ return ret;
+}
+
+static uint32_t
+zxdh_np_agent_channel_reg_write(uint32_t dev_id,
+ uint32_t reg_type,
+ uint32_t reg_no,
+ uint32_t reg_width,
+ uint32_t addr,
+ uint32_t *p_data)
+{
+ uint32_t ret = ZXDH_OK;
+ ZXDH_AGENT_CHANNEL_REG_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_REG_MSG,
+ .subtype = reg_type,
+ .oper = ZXDH_WR,
+ .reg_no = reg_no,
+ .addr = addr,
+ .val_len = reg_width / 4,
+ };
+
+ memcpy(msgcfg.val, p_data, reg_width);
+
+ uint32_t resp_len = reg_width + 4;
+ uint8_t *resp_buffer = rte_zmalloc(NULL, resp_len, 0);
+ if (resp_buffer == NULL) {
+ PMD_DRV_LOG(ERR, "malloc memory failed");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ ret = zxdh_np_agent_channel_reg_sync_send(dev_id,
+ &msgcfg, (uint32_t *)resp_buffer, resp_len);
+
+ if (ret != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %u reg_no %u send agent write failed.", dev_id, reg_no);
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ if (*((uint32_t *)resp_buffer) != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "dev id %u reg_no %u agent write resp err %u .",
+ dev_id, reg_no, *((uint32_t *)resp_buffer));
+ rte_free(resp_buffer);
+ return ZXDH_ERR;
+ }
+
+ memcpy(p_data, resp_buffer + 4, reg_width);
+
+ rte_free(resp_buffer);
+
+ return ret;
+}
+
static ZXDH_DTB_MGR_T *
zxdh_np_dtb_mgr_get(uint32_t dev_id)
{
@@ -1263,6 +1538,24 @@ zxdh_np_np_sdk_version_compatible_check(uint32_t dev_id)
return ZXDH_OK;
}
+static uint32_t
+zxdh_np_pcie_bar_msg_num_get(uint32_t dev_id, uint32_t *p_bar_msg_num)
+{
+ uint32_t rc = ZXDH_OK;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+ rc = zxdh_np_agent_channel_pcie_bar_request(dev_id, p_bar_msg_num);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_pcie_bar_request");
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
static ZXDH_RISCV_DTB_MGR *
zxdh_np_riscv_dtb_queue_mgr_get(uint32_t dev_id)
{
@@ -1381,12 +1674,19 @@ zxdh_np_reg_read(uint32_t dev_id, uint32_t reg_no,
uint32_t i;
uint32_t addr = 0;
uint32_t reg_module = p_reg_info->module_no;
+ uint32_t reg_width = p_reg_info->width;
+ uint32_t reg_real_no = p_reg_info->reg_no;
+ uint32_t reg_type = p_reg_info->flags;
addr = zxdh_np_reg_get_reg_addr(reg_no, m_offset, n_offset);
if (reg_module == DTB4K) {
rc = p_reg_info->p_read_fun(dev_id, addr, p_buff);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "p_reg_info->p_read_fun");
+ } else {
+ rc = zxdh_np_agent_channel_reg_read(dev_id,
+ reg_type, reg_real_no, reg_width, addr, p_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_reg_read");
}
if (!zxdh_np_comm_is_big_endian()) {
@@ -1518,6 +1818,9 @@ zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no,
uint32_t i;
uint32_t addr = 0;
uint32_t reg_module = p_reg_info->module_no;
+ uint32_t reg_width = p_reg_info->width;
+ uint32_t reg_type = p_reg_info->flags;
+ uint32_t reg_real_no = p_reg_info->reg_no;
for (i = 0; i < p_reg_info->field_num; i++) {
if (p_field_info[i].len <= 32) {
@@ -1552,6 +1855,10 @@ zxdh_np_reg_write(uint32_t dev_id, uint32_t reg_no,
if (reg_module == DTB4K) {
rc = p_reg_info->p_write_fun(dev_id, addr, p_buff);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "p_reg_info->p_write_fun");
+ } else {
+ rc = zxdh_np_agent_channel_reg_write(dev_id,
+ reg_type, reg_real_no, reg_width, addr, p_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_reg_write");
}
return rc;
@@ -2945,6 +3252,7 @@ zxdh_np_host_init(uint32_t dev_id,
ZXDH_SYS_INIT_CTRL_T sys_init_ctrl = {0};
uint32_t rc;
uint64_t agent_addr;
+ uint32_t bar_msg_num = 0;
ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_dev_init_ctrl);
@@ -2966,6 +3274,11 @@ zxdh_np_host_init(uint32_t dev_id,
rc = zxdh_np_np_sdk_version_compatible_check(dev_id);
ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_np_sdk_version_compatible_check");
+ rc = zxdh_np_pcie_bar_msg_num_get(dev_id, &bar_msg_num);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_pcie_bar_msg_num_get");
+
+ zxdh_np_dev_fw_bar_msg_num_set(dev_id, bar_msg_num);
+
return 0;
}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 11eb7e15d5..a692eca9aa 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -112,9 +112,17 @@
#define ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL \
(ZXDH_SE_SMMU0_ERAM_BLOCK_NUM * ZXDH_SE_SMMU0_ERAM_ADDR_NUM_PER_BLOCK)
+#define ZXDH_CHANNEL_REPS_LEN (4)
+
#define ZXDH_NPSDK_COMPAT_ITEM_ID (10)
#define ZXDH_DPU_NO_DEBUG_PF_COMPAT_REG_OFFSET (0x5400)
+#define ZXDH_VF_ACTIVE(VPORT) (((VPORT) & 0x0800) >> 11)
+#define ZXDH_EPID_BY(VPORT) (((VPORT) & 0x7000) >> 12)
+#define ZXDH_FUNC_NUM(VPORT) (((VPORT) & 0x0700) >> 8)
+#define ZXDH_VFUNC_NUM(VPORT) (((VPORT) & 0x00FF))
+#define ZXDH_IS_PF(VPORT) (!ZXDH_VF_ACTIVE(VPORT))
+
#define ZXDH_SDT_CFG_LEN (2)
#define ZXDH_SDT_VALID (1)
#define ZXDH_SDT_INVALID (0)
@@ -178,6 +186,12 @@
#define ZXDH_PAR_CHK_INVALID_DEV_ID (ZXDH_PARAMETER_CHK_BASE | 0x007)
#define ZXDH_PAR_CHK_INVALID_PARA (ZXDH_PARAMETER_CHK_BASE | 0x008)
+#define ZXDH_SPIN_LOCK_BASE (ZXDH_RC_BASE | 0x300)
+#define ZXDH_SPIN_LOCK_INIT_FAIL (ZXDH_SPIN_LOCK_BASE | 0x001)
+#define ZXDH_SPIN_LOCK_LOCK_FAIL (ZXDH_SPIN_LOCK_BASE | 0x002)
+#define ZXDH_SPIN_LOCK_ULOCK_FAIL (ZXDH_SPIN_LOCK_BASE | 0X003)
+#define ZXDH_SPIN_LOCK_DESTROY_FAIL (ZXDH_SPIN_LOCK_BASE | 0X004)
+
#define ZXDH_ERAM128_BADDR_MASK (0x3FFFF80)
#define ZXDH_DTB_TABLE_MODE_ERAM (0)
@@ -215,6 +229,13 @@
#define ZXDH_RC_DTB_SEARCH_VPORT_QUEUE_ZERO (ZXDH_RC_DTB_BASE | 0x17)
#define ZXDH_RC_DTB_QUEUE_NOT_ENABLE (ZXDH_RC_DTB_BASE | 0x18)
+#define ZXDH_RC_CTRLCH_BASE (0xf00)
+#define ZXDH_RC_CTRLCH_MSG_LEN_ZERO (ZXDH_RC_CTRLCH_BASE | 0x0)
+#define ZXDH_RC_CTRLCH_MSG_PRO_ERR (ZXDH_RC_CTRLCH_BASE | 0x1)
+#define ZXDH_RC_CTRLCH_MSG_TYPE_NOT_SUPPORT (ZXDH_RC_CTRLCH_BASE | 0x2)
+#define ZXDH_RC_CTRLCH_MSG_OPER_NOT_SUPPORT (ZXDH_RC_CTRLCH_BASE | 0x3)
+#define ZXDH_RC_CTRLCH_MSG_DROP (ZXDH_RC_CTRLCH_BASE | 0x4)
+
#define ZXDH_SCHE_RSP_LEN (2)
#define ZXDH_G_PROFILE_ID_LEN (8)
@@ -465,6 +486,7 @@ typedef struct dpp_dev_cfg_t {
uint32_t access_type;
uint32_t agent_flag;
uint32_t vport;
+ uint32_t fw_bar_msg_num;
uint64_t pcie_addr;
uint64_t riscv_addr;
uint64_t dma_vir_addr;
@@ -473,6 +495,8 @@ typedef struct dpp_dev_cfg_t {
uint32_t init_flags[ZXDH_MODULE_INIT_MAX];
ZXDH_DEV_WRITE_FUNC p_pcie_write_fun;
ZXDH_DEV_READ_FUNC p_pcie_read_fun;
+ ZXDH_SPINLOCK_T dtb_spinlock;
+ ZXDH_SPINLOCK_T smmu0_spinlock;
} ZXDH_DEV_CFG_T;
typedef struct zxdh_dev_mngr_t {
@@ -726,6 +750,17 @@ typedef enum zxdh_stat_cnt_mode_e {
ZXDH_STAT_MAX_MODE,
} ZXDH_STAT_CNT_MODE_E;
+typedef enum zxdh_agent_pcie_bar_e {
+ ZXDH_BAR_MSG_NUM_REQ = 0,
+ ZXDH_PCIE_BAR_MAX
+} ZXDH_MSG_PCIE_BAR_E;
+
+typedef enum zxdh_agent_msg_oper_e {
+ ZXDH_WR = 0,
+ ZXDH_RD,
+ ZXDH_WR_RD_MAX
+} ZXDH_MSG_OPER_E;
+
typedef struct zxdh_smmu0_smmu0_cpu_ind_cmd_t {
uint32_t cpu_ind_rw;
uint32_t cpu_ind_rd_mode;
@@ -917,6 +952,24 @@ typedef struct __rte_aligned(2) zxdh_version_compatible_reg_t {
uint8_t rsv[2];
} ZXDH_VERSION_COMPATIBLE_REG_T;
+typedef struct __rte_aligned(2) zxdh_agent_channel_pcie_bar_msg_t {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t oper;
+ uint8_t rsv;
+} ZXDH_AGENT_PCIE_BAR_MSG_T;
+
+typedef struct __rte_aligned(2) zxdh_agent_channel_reg_msg {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t subtype;
+ uint8_t oper;
+ uint32_t reg_no;
+ uint32_t addr;
+ uint32_t val_len;
+ uint32_t val[32];
+} ZXDH_AGENT_CHANNEL_REG_MSG_T;
+
typedef struct __rte_aligned(2) zxdh_agent_channel_msg_t {
uint32_t msg_len;
void *msg;
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 37167 bytes --]
next prev parent reply other threads:[~2025-03-17 15:14 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-10 1:44 [PATCH v1 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-10 1:46 ` [PATCH v1 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-10 17:25 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-10 17:28 ` Stephen Hemminger
2025-02-10 17:30 ` Stephen Hemminger
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 18:23 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 1:48 ` [PATCH v1 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-10 17:33 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-10 17:36 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-10 17:40 ` Stephen Hemminger
2025-02-10 17:43 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-10 17:45 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-10 17:46 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-10 17:47 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 18:19 ` Stephen Hemminger
2025-02-22 7:22 ` [PATCH v2 00/14] add network processor ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-22 17:34 ` Stephen Hemminger
2025-03-05 8:13 ` [PATCH v3 00/14] net/zxdh: add network processor ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 00/14] net/zxdh: add network processor ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-03-17 14:57 ` Bingbin Chen [this message]
2025-03-17 14:57 ` [PATCH v4 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-03-17 14:58 ` [PATCH v4 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-03-17 14:58 ` [PATCH v4 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-03-17 14:58 ` [PATCH v4 14/14] net/zxdh: fix debugging errors Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 03/14] net/zxdh: add agent channel Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 14/14] net/zxdh: modify parameters of the plcr function Bingbin Chen
2025-03-10 23:19 ` [PATCH v1 01/14] net/zxdh: add network processor registers ops Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250317145802.1819809-4-chen.bingbin@zte.com.cn \
--to=chen.bingbin@zte.com.cn \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
--cc=wang.junlong1@zte.com.cn \
--cc=yang.yonggang@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).