From: Bingbin Chen <chen.bingbin@zte.com.cn>
To: stephen@networkplumber.org, wang.junlong1@zte.com.cn,
yang.yonggang@zte.com.cn
Cc: dev@dpdk.org, Bingbin Chen <chen.bingbin@zte.com.cn>
Subject: [PATCH v4 14/14] net/zxdh: fix debugging errors
Date: Mon, 17 Mar 2025 22:58:02 +0800 [thread overview]
Message-ID: <20250317145802.1819809-15-chen.bingbin@zte.com.cn> (raw)
In-Reply-To: <20250317145802.1819809-1-chen.bingbin@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 33881 bytes --]
Fix zxdh driver packet sending and receiving errors.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
drivers/net/zxdh/zxdh_ethdev.c | 19 +++++----
drivers/net/zxdh/zxdh_ethdev_ops.c | 15 +++----
drivers/net/zxdh/zxdh_msg.c | 20 ++-------
drivers/net/zxdh/zxdh_msg.h | 12 +++---
drivers/net/zxdh/zxdh_mtr.c | 8 ++--
drivers/net/zxdh/zxdh_np.c | 68 ++++++++++++++++++++++++++----
drivers/net/zxdh/zxdh_np.h | 8 ++--
drivers/net/zxdh/zxdh_pci.c | 24 ++++++++++-
drivers/net/zxdh/zxdh_pci.h | 2 +-
drivers/net/zxdh/zxdh_rxtx.c | 11 +++--
drivers/net/zxdh/zxdh_tables.c | 42 +++++++++---------
drivers/net/zxdh/zxdh_tables.h | 68 +++++++++++++++++++++++-------
12 files changed, 201 insertions(+), 96 deletions(-)
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index ea8b18e5e1..ba7ea52d20 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1250,10 +1250,6 @@ zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct zxdh_hw *hw = eth_dev->data->dev_private;
- if (!zxdh_pci_packed_queue(hw)) {
- PMD_DRV_LOG(ERR, "port %u not support packed queue", eth_dev->data->port_id);
- return -1;
- }
if (!zxdh_pci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {
PMD_DRV_LOG(ERR, "port %u not support rx mergeable", eth_dev->data->port_id);
return -1;
@@ -1498,6 +1494,8 @@ zxdh_dtb_dump_res_init(struct zxdh_hw *hw, ZXDH_DEV_INIT_CTRL_T *dpp_ctrl)
struct zxdh_dtb_bulk_dump_info dtb_dump_baseres[] = {
{"sdt_vport_att_table", 4 * 1024 * 1024, ZXDH_SDT_VPORT_ATT_TABLE, NULL},
+ {"sdt_vlan_att_table", 4 * 1024 * 1024, ZXDH_SDT_VLAN_ATT_TABLE, NULL},
+ {"sdt_rss_table", 4 * 1024 * 1024, ZXDH_SDT_RSS_ATT_TABLE, NULL},
{"sdt_l2_entry_table0", 5 * 1024 * 1024, ZXDH_SDT_L2_ENTRY_TABLE0, NULL},
{"sdt_l2_entry_table1", 5 * 1024 * 1024, ZXDH_SDT_L2_ENTRY_TABLE1, NULL},
{"sdt_l2_entry_table2", 5 * 1024 * 1024, ZXDH_SDT_L2_ENTRY_TABLE2, NULL},
@@ -1514,7 +1512,8 @@ zxdh_dtb_dump_res_init(struct zxdh_hw *hw, ZXDH_DEV_INIT_CTRL_T *dpp_ctrl)
for (i = 0; i < (int)RTE_DIM(dtb_dump_baseres); i++) {
struct zxdh_dtb_bulk_dump_info *p = dtb_dump_baseres + i;
char buf[ZXDH_MAX_NAME_LEN] = {0};
-
+ memset(buf, '\0', sizeof(buf));
+ sprintf(buf, "%s_%x", p->mz_name, hw->dev_id);
p->mz_name = buf;
const struct rte_memzone *generic_dump_mz =
@@ -1544,6 +1543,7 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
struct zxdh_hw *hw = dev->data->dev_private;
struct zxdh_bar_offset_params param = {0};
struct zxdh_bar_offset_res res = {0};
+ char buf[ZXDH_MAX_NAME_LEN] = {0};
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
int ret = 0;
@@ -1569,7 +1569,7 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
dpp_ctrl->vport = hw->vport.vport;
dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;
strlcpy(dpp_ctrl->port_name, dev->device->name, sizeof(dpp_ctrl->port_name));
- dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0];
+ dpp_ctrl->pcie_vir_addr = (uint64_t)hw->bar_addr[0];
param.pcie_id = hw->pcie_id;
param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
@@ -1584,7 +1584,8 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
dpp_ctrl->np_bar_offset = res.bar_offset;
if (!dtb_data->dtb_table_conf_mz) {
- const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",
+ sprintf(buf, "%s_%x", "zxdh_dtb_table_conf_mz", hw->dev_id);
+ const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned(buf,
ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
if (conf_mz == NULL) {
@@ -1600,7 +1601,9 @@ zxdh_np_dtb_res_init(struct rte_eth_dev *dev)
}
if (!dtb_data->dtb_table_dump_mz) {
- const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",
+ memset(buf, '\0', sizeof(buf));
+ sprintf(buf, "%s_%x", "zxdh_dtb_table_dump_mz", hw->dev_id);
+ const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned(buf,
ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
if (dump_mz == NULL) {
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 2b02734c62..aa1001da01 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -433,7 +433,7 @@ zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
ret = zxdh_del_mac_table(hw, hw->vport.vport, old_addr,
hw->hash_search_index, 0, 0);
if (ret) {
- PMD_DRV_LOG(ERR, "mac_addr_add failed, code:%d", ret);
+ PMD_DRV_LOG(ERR, "mac_addr_del failed, code:%d", ret);
return ret;
}
hw->uc_num--;
@@ -467,6 +467,8 @@ zxdh_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
hw->uc_num--;
}
rte_ether_addr_copy(addr, (struct rte_ether_addr *)hw->mac_addr);
+ zxdh_pci_write_dev_config(hw, offsetof(struct zxdh_net_config, mac),
+ &hw->mac_addr, RTE_ETHER_ADDR_LEN);
return ret;
}
@@ -566,7 +568,7 @@ zxdh_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
void zxdh_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
- struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_hw *hw = dev->data->dev_private;
struct zxdh_msg_info msg_info = {0};
struct rte_ether_addr *mac_addr = &dev->data->mac_addrs[index];
uint16_t ret = 0;
@@ -2040,11 +2042,6 @@ zxdh_dev_fw_version_get(struct rte_eth_dev *dev,
zxdh_agent_msg_build(hw, ZXDH_FLASH_FIR_VERSION_GET, &msg_info);
- struct zxdh_msg_recviver_mem rsp_data = {
- .recv_buffer = (void *)&reply_info,
- .buffer_len = sizeof(struct zxdh_msg_reply_info),
- };
-
ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),
&reply_info, sizeof(struct zxdh_msg_reply_info),
ZXDH_MODULE_FLASH);
@@ -2053,10 +2050,8 @@ zxdh_dev_fw_version_get(struct rte_eth_dev *dev,
hw->vport.vport, ZXDH_FLASH_FIR_VERSION_GET);
return -1;
}
- struct zxdh_msg_reply_body *ack_msg =
- &(((struct zxdh_msg_reply_info *)rsp_data.recv_buffer)->reply_body);
- memcpy(fw_ver, ack_msg->flash_msg.firmware_version, ZXDH_FWVERS_LEN);
+ memcpy(fw_ver, &reply_info.reply_body.flash_msg, ZXDH_FWVERS_LEN);
snprintf(fw_version, ZXDH_FWVERS_LEN - 1, "%s", fw_ver);
return 0;
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 96ad638e83..fac08fc30c 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -695,7 +695,7 @@ static uint16_t
zxdh_bar_chan_sync_msg_reps_get(uint64_t subchan_addr,
uint64_t recv_buffer, uint16_t buffer_len)
{
- struct zxdh_bar_msg_header msg_header = {0};
+ struct zxdh_bar_msg_header msg_header;
uint16_t msg_id = 0;
uint16_t msg_len = 0;
@@ -1147,13 +1147,9 @@ zxdh_send_msg_to_riscv(struct rte_eth_dev *dev, void *msg_req,
result.recv_buffer = &reply_info;
result.buffer_len = sizeof(reply_info);
}
- struct zxdh_msg_reply_head *reply_head =
- &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_head);
- struct zxdh_msg_reply_body *reply_body =
- &(((struct zxdh_msg_reply_info *)result.recv_buffer)->reply_body);
struct zxdh_pci_bar_msg in = {
- .payload_addr = &msg_req,
+ .payload_addr = msg_req,
.payload_len = msg_req_len,
.virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET),
.src = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF,
@@ -1166,15 +1162,6 @@ zxdh_send_msg_to_riscv(struct rte_eth_dev *dev, void *msg_req,
PMD_MSG_LOG(ERR, "Failed to send sync messages or receive response");
return -1;
}
- if (reply_head->flag != ZXDH_MSG_REPS_OK) {
- PMD_MSG_LOG(ERR, "vf[%d] get pf reply failed: reply_head flag : 0x%x(0xff is OK).replylen %d",
- hw->vport.vfid, reply_head->flag, reply_head->reps_len);
- return -1;
- }
- if (reply_body->flag != ZXDH_REPS_SUCC) {
- PMD_MSG_LOG(ERR, "vf[%d] msg processing failed", hw->vfid);
- return -1;
- }
return 0;
}
@@ -2043,7 +2030,8 @@ zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
(struct zxdh_plcr_profile_cfg *)cfg_data;
union zxdh_offload_profile_cfg *plcr_param = &zxdh_plcr_profile_cfg->plcr_param;
- ret = zxdh_np_car_profile_cfg_set(vport,
+ ret = zxdh_np_car_profile_cfg_set(pf_hw->dev_id,
+ vport,
zxdh_plcr_profile_cfg->car_type,
zxdh_plcr_profile_cfg->packet_mode,
zxdh_plcr_profile_cfg->hw_profile_id,
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 58836bb4b7..ffe2c9472f 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -384,9 +384,9 @@ struct zxdh_mtr_profile_info {
};
struct zxdh_msg_reply_body {
- enum zxdh_reps_flag flag;
+ uint8_t flag;
union {
- uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)];
+ uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(uint8_t)];
struct zxdh_hw_np_stats np_stats;
struct zxdh_link_info_msg link_msg;
struct zxdh_rss_reta rss_reta;
@@ -412,12 +412,12 @@ struct zxdh_vf_init_msg {
uint8_t rss_enable;
};
-struct zxdh_msg_head {
- enum zxdh_msg_type msg_type;
+struct __rte_packed_begin zxdh_msg_head {
+ uint8_t msg_type;
uint16_t vport;
uint16_t vf_id;
uint16_t pcieid;
-};
+} __rte_packed_end;
struct zxdh_port_attr_set_msg {
uint32_t mode;
@@ -455,7 +455,7 @@ struct zxdh_rss_enable {
};
struct zxdh_agent_msg_head {
- enum zxdh_agent_msg_type msg_type;
+ uint8_t msg_type;
uint8_t panel_id;
uint8_t phyport;
uint8_t rsv;
diff --git a/drivers/net/zxdh/zxdh_mtr.c b/drivers/net/zxdh/zxdh_mtr.c
index 3797a5b29b..809456d73f 100644
--- a/drivers/net/zxdh/zxdh_mtr.c
+++ b/drivers/net/zxdh/zxdh_mtr.c
@@ -281,7 +281,7 @@ zxdh_hw_profile_free_direct(struct rte_eth_dev *dev, ZXDH_PROFILE_TYPE car_type,
{
struct zxdh_hw *hw = dev->data->dev_private;
uint16_t vport = hw->vport.vport;
- int ret = zxdh_np_car_profile_id_delete(vport, car_type,
+ int ret = zxdh_np_car_profile_id_delete(hw->dev_id, vport, car_type,
(uint64_t)hw_profile_id);
if (ret) {
PMD_DRV_LOG(ERR, "port %u free hw profile %u failed", vport, hw_profile_id);
@@ -299,7 +299,7 @@ zxdh_hw_profile_alloc_direct(struct rte_eth_dev *dev, ZXDH_PROFILE_TYPE car_type
uint64_t profile_id = HW_PROFILE_MAX;
struct zxdh_hw *hw = dev->data->dev_private;
uint16_t vport = hw->vport.vport;
- int ret = zxdh_np_car_profile_id_add(vport, car_type, &profile_id);
+ int ret = zxdh_np_car_profile_id_add(hw->dev_id, vport, car_type, &profile_id);
if (ret) {
PMD_DRV_LOG(ERR, "port %u alloc hw profile failed", vport);
@@ -551,7 +551,9 @@ zxdh_hw_profile_config_direct(struct rte_eth_dev *dev __rte_unused,
struct zxdh_meter_profile *mp,
struct rte_mtr_error *error)
{
- int ret = zxdh_np_car_profile_cfg_set(mp->hw_profile_owner_vport,
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = zxdh_np_car_profile_cfg_set(hw->dev_id,
+ mp->hw_profile_owner_vport,
car_type, mp->profile.packet_mode,
(uint32_t)hw_profile_id, &mp->plcr_param);
if (ret) {
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index ab8b3ae688..640c835b8e 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -2350,6 +2350,8 @@ zxdh_np_dev_add(uint32_t dev_id, ZXDH_DEV_TYPE_E dev_type,
rte_spinlock_init(&p_dev_info->dtb_spinlock.spinlock);
+ rte_spinlock_init(&p_dev_info->smmu0_spinlock.spinlock);
+
for (i = 0; i < ZXDH_DTB_QUEUE_NUM_MAX; i++)
rte_spinlock_init(&p_dev_info->dtb_queue_spinlock[i].spinlock);
@@ -3391,6 +3393,32 @@ zxdh_np_reg_read(uint32_t dev_id, uint32_t reg_no,
return rc;
}
+static uint32_t
+zxdh_np_reg_read32(uint32_t dev_id, uint32_t reg_no,
+ uint32_t m_offset, uint32_t n_offset, uint32_t *p_data)
+{
+ uint32_t rc = 0;
+ uint32_t addr = 0;
+ ZXDH_REG_T *p_reg_info = &g_dpp_reg_info[reg_no];
+ uint32_t p_buff[ZXDH_REG_DATA_MAX] = {0};
+ uint32_t reg_real_no = p_reg_info->reg_no;
+ uint32_t reg_type = p_reg_info->flags;
+ uint32_t reg_module = p_reg_info->module_no;
+
+ addr = zxdh_np_reg_get_reg_addr(reg_no, m_offset, n_offset);
+
+ if (reg_module == DTB4K) {
+ rc = p_reg_info->p_read_fun(dev_id, addr, p_data);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "p_reg_info->p_read_fun");
+ } else {
+ rc = zxdh_np_agent_channel_reg_read(dev_id, reg_type, reg_real_no, 4, addr, p_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_agent_channel_reg_read");
+ *p_data = p_buff[0];
+ }
+
+ return rc;
+}
+
static uint32_t
zxdh_np_dtb_queue_vm_info_get(uint32_t dev_id,
uint32_t queue_id,
@@ -10542,9 +10570,9 @@ zxdh_np_se_done_status_check(uint32_t dev_id, uint32_t reg_no, uint32_t pos)
uint32_t done_flag = 0;
while (!done_flag) {
- rc = zxdh_np_reg_read(dev_id, reg_no, 0, 0, &data);
+ rc = zxdh_np_reg_read32(dev_id, reg_no, 0, 0, &data);
if (rc != 0) {
- PMD_DRV_LOG(ERR, "reg_read fail!");
+ PMD_DRV_LOG(ERR, "reg_read32 fail!");
return rc;
}
@@ -10577,10 +10605,17 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
uint32_t temp_data[4] = {0};
uint32_t *p_temp_data = NULL;
ZXDH_SMMU0_SMMU0_CPU_IND_CMD_T cpu_ind_cmd = {0};
+ ZXDH_SPINLOCK_T *p_ind_spinlock = NULL;
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, ZXDH_DEV_SPINLOCK_T_SMMU0, &p_ind_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_ind_spinlock->spinlock);
rc = zxdh_np_se_done_status_check(dev_id, ZXDH_SMMU0_SMMU0_WR_ARB_CPU_RDYR, 0);
if (rc != ZXDH_OK) {
PMD_DRV_LOG(ERR, "se done status check failed, rc=0x%x.", rc);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
@@ -10592,11 +10627,13 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
switch (rd_mode) {
case ZXDH_ERAM128_OPR_128b:
if ((0xFFFFFFFF - (base_addr)) < (index)) {
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
PMD_DRV_LOG(ERR, "index 0x%x is invalid!", index);
return ZXDH_PAR_CHK_INVALID_INDEX;
}
if (base_addr + index > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 7) & ZXDH_ERAM128_BADDR_MASK;
@@ -10604,6 +10641,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_64b:
if ((base_addr + (index >> 1)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 6) & ZXDH_ERAM128_BADDR_MASK;
@@ -10612,6 +10650,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_32b:
if ((base_addr + (index >> 2)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 5) & ZXDH_ERAM128_BADDR_MASK;
@@ -10620,6 +10659,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_1b:
if ((base_addr + (index >> 7)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = index & ZXDH_ERAM128_BADDR_MASK;
@@ -10638,10 +10678,12 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_128b:
if ((0xFFFFFFFF - (base_addr)) < (index)) {
PMD_DRV_LOG(ERR, "index 0x%x is invalid!", index);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_PAR_CHK_INVALID_INDEX;
}
if (base_addr + index > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 7);
@@ -10650,6 +10692,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_64b:
if ((base_addr + (index >> 1)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 6);
@@ -10658,6 +10701,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
case ZXDH_ERAM128_OPR_32b:
if ((base_addr + (index >> 2)) > ZXDH_SE_SMMU0_ERAM_ADDR_NUM_TOTAL - 1) {
PMD_DRV_LOG(ERR, "index out of range!");
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
row_index = (index << 5);
@@ -10665,7 +10709,8 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
break;
case ZXDH_ERAM128_OPR_1b:
PMD_DRV_LOG(ERR, "rd_clr_mode[%u] or rd_mode[%u] error!",
- rd_clr_mode, rd_mode);
+ rd_clr_mode, rd_mode);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
default:
break;
@@ -10680,12 +10725,14 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
&cpu_ind_cmd);
if (rc != ZXDH_OK) {
PMD_DRV_LOG(ERR, "zxdh_np_reg_write failed, rc=0x%x.", rc);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
rc = zxdh_np_se_done_status_check(dev_id, ZXDH_SMMU0_SMMU0_CPU_IND_RD_DONER, 0);
if (rc != ZXDH_OK) {
PMD_DRV_LOG(ERR, "se done status check failed, rc=0x%x.", rc);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
@@ -10698,6 +10745,7 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
p_temp_data + 3 - i);
if (rc != ZXDH_OK) {
PMD_DRV_LOG(ERR, "zxdh_np_reg_write failed, rc=0x%x.", rc);
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
return ZXDH_ERR;
}
}
@@ -10736,6 +10784,8 @@ zxdh_np_se_smmu0_ind_read(uint32_t dev_id,
}
}
+ rte_spinlock_unlock(&p_ind_spinlock->spinlock);
+
return rc;
}
@@ -11012,7 +11062,8 @@ zxdh_np_stat_carc_queue_cfg_set(uint32_t dev_id,
}
uint32_t
-zxdh_np_car_profile_id_add(uint32_t vport_id,
+zxdh_np_car_profile_id_add(uint32_t dev_id,
+ uint32_t vport_id,
uint32_t flags,
uint64_t *p_profile_id)
{
@@ -11027,7 +11078,7 @@ zxdh_np_car_profile_id_add(uint32_t vport_id,
PMD_DRV_LOG(ERR, "profile_id point null!");
return ZXDH_PAR_CHK_POINT_NULL;
}
- ret = zxdh_np_agent_channel_plcr_profileid_request(0, vport_id, flags, profile_id);
+ ret = zxdh_np_agent_channel_plcr_profileid_request(dev_id, vport_id, flags, profile_id);
profile_id_h = *(profile_id + 1);
profile_id_l = *profile_id;
@@ -11045,14 +11096,14 @@ zxdh_np_car_profile_id_add(uint32_t vport_id,
}
uint32_t
-zxdh_np_car_profile_cfg_set(uint32_t vport_id __rte_unused,
+zxdh_np_car_profile_cfg_set(uint32_t dev_id,
+ uint32_t vport_id __rte_unused,
uint32_t car_type,
uint32_t pkt_sign,
uint32_t profile_id,
void *p_car_profile_cfg)
{
uint32_t ret = 0;
- uint32_t dev_id = 0;
ret = zxdh_np_agent_channel_plcr_car_rate(dev_id, car_type,
pkt_sign, profile_id, p_car_profile_cfg);
@@ -11065,11 +11116,10 @@ zxdh_np_car_profile_cfg_set(uint32_t vport_id __rte_unused,
}
uint32_t
-zxdh_np_car_profile_id_delete(uint32_t vport_id,
+zxdh_np_car_profile_id_delete(uint32_t dev_id, uint32_t vport_id,
uint32_t flags, uint64_t profile_id)
{
uint32_t ret = 0;
- uint32_t dev_id = 0;
uint32_t profileid = profile_id & 0xFFFF;
ret = zxdh_np_agent_channel_plcr_profileid_release(dev_id, vport_id, flags, profileid);
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index b1d8b1aef8..1b8f17474d 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -1934,15 +1934,17 @@ uint32_t zxdh_np_stat_ppu_cnt_get_ex(uint32_t dev_id,
uint32_t clr_mode,
uint32_t *p_data);
uint32_t
-zxdh_np_car_profile_id_add(uint32_t vport_id,
+zxdh_np_car_profile_id_add(uint32_t dev_id,
+ uint32_t vport_id,
uint32_t flags,
uint64_t *p_profile_id);
-uint32_t zxdh_np_car_profile_cfg_set(uint32_t vport_id,
+uint32_t zxdh_np_car_profile_cfg_set(uint32_t dev_id,
+ uint32_t vport_id,
uint32_t car_type,
uint32_t pkt_sign,
uint32_t profile_id,
void *p_car_profile_cfg);
-uint32_t zxdh_np_car_profile_id_delete(uint32_t vport_id,
+uint32_t zxdh_np_car_profile_id_delete(uint32_t dev_id, uint32_t vport_id,
uint32_t flags, uint64_t profile_id);
uint32_t zxdh_np_stat_car_queue_cfg_set(uint32_t dev_id,
uint32_t car_type,
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 3d1a3ff0dd..4ff0f065df 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -27,6 +27,23 @@
1ULL << ZXDH_F_NOTIFICATION_DATA | \
1ULL << ZXDH_NET_F_MAC)
+#define ZXDH_PMD_DEFAULT_HOST_FEATURES \
+ (1ULL << ZXDH_NET_F_MRG_RXBUF | \
+ 1ULL << ZXDH_NET_F_STATUS | \
+ 1ULL << ZXDH_NET_F_MQ | \
+ 1ULL << ZXDH_F_ANY_LAYOUT | \
+ 1ULL << ZXDH_F_VERSION_1 | \
+ 1ULL << ZXDH_F_RING_PACKED | \
+ 1ULL << ZXDH_F_IN_ORDER | \
+ 1ULL << ZXDH_F_NOTIFICATION_DATA |\
+ 1ULL << ZXDH_NET_F_MAC | \
+ 1ULL << ZXDH_NET_F_CSUM |\
+ 1ULL << ZXDH_NET_F_GUEST_CSUM |\
+ 1ULL << ZXDH_NET_F_GUEST_TSO4 |\
+ 1ULL << ZXDH_NET_F_GUEST_TSO6 |\
+ 1ULL << ZXDH_NET_F_HOST_TSO4 |\
+ 1ULL << ZXDH_NET_F_HOST_TSO6)
+
static void
zxdh_read_dev_config(struct zxdh_hw *hw, size_t offset,
void *dst, int32_t length)
@@ -391,13 +408,18 @@ zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset, void *dst, int32_t l
ZXDH_VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
}
+void zxdh_pci_write_dev_config(struct zxdh_hw *hw, size_t offset, const void *src, int32_t length)
+{
+ ZXDH_VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
void
zxdh_get_pci_dev_config(struct zxdh_hw *hw)
{
uint64_t guest_features = 0;
uint64_t nego_features = 0;
- hw->host_features = zxdh_pci_get_features(hw);
+ hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;
guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;
nego_features = guest_features & hw->host_features;
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index 9b8bef6c09..a1834f6615 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -162,7 +162,7 @@ void zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset,
int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw);
void zxdh_get_pci_dev_config(struct zxdh_hw *hw);
-
+void zxdh_pci_write_dev_config(struct zxdh_hw *hw, size_t offset, const void *src, int32_t length);
uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);
enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);
uint8_t zxdh_pci_isr(struct zxdh_hw *hw);
diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c
index 1921a23f25..4c79b9e75b 100644
--- a/drivers/net/zxdh/zxdh_rxtx.c
+++ b/drivers/net/zxdh/zxdh_rxtx.c
@@ -818,9 +818,14 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
seg_num = header->type_hdr.num_buffers;
/* Private queue only handle type hdr */
- hdr_size = ZXDH_TYPE_HDR_SIZE;
- rxm->pkt_len = ((header->type_hdr.port & 0x7f) << 8) +
- header->type_hdr.pd_len;
+ hdr_size = header->type_hdr.pd_len << 1;
+ if (unlikely(hdr_size > lens[i] || hdr_size < ZXDH_TYPE_HDR_SIZE)) {
+ PMD_RX_LOG(ERR, "hdr_size:%u is invalid", hdr_size);
+ rte_pktmbuf_free(rxm);
+ rxvq->stats.errors++;
+ rxvq->stats.invalid_hdr_len_err++;
+ continue;
+ }
rxm->data_off += hdr_size;
rxm->nb_segs = seg_num;
rxm->ol_flags = 0;
diff --git a/drivers/net/zxdh/zxdh_tables.c b/drivers/net/zxdh/zxdh_tables.c
index 253d9ce438..ab91d51948 100644
--- a/drivers/net/zxdh/zxdh_tables.c
+++ b/drivers/net/zxdh/zxdh_tables.c
@@ -8,14 +8,7 @@
#include "zxdh_tables.h"
#include "zxdh_logs.h"
-#define ZXDH_SDT_VPORT_ATT_TABLE 1
-#define ZXDH_SDT_PANEL_ATT_TABLE 2
-#define ZXDH_SDT_RSS_ATT_TABLE 3
-#define ZXDH_SDT_VLAN_ATT_TABLE 4
-#define ZXDH_SDT_BROCAST_ATT_TABLE 6
-#define ZXDH_SDT_UNICAST_ATT_TABLE 10
-#define ZXDH_SDT_MULTICAST_ATT_TABLE 11
-#define ZXDH_SDT_PORT_VLAN_ATT_TABLE 16
+
#define ZXDH_MAC_HASH_INDEX_BASE 64
#define ZXDH_MAC_HASH_INDEX(index) (ZXDH_MAC_HASH_INDEX_BASE + (index))
@@ -40,15 +33,16 @@ zxdh_set_port_attr(struct zxdh_hw *hw, uint16_t vport, struct zxdh_port_attr_tab
{
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
int ret = 0;
- ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid, (uint32_t *)port_attr};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid, (uint32_t *)port_attr};
ZXDH_DTB_USER_ENTRY_T user_entry_write = {ZXDH_SDT_VPORT_ATT_TABLE, (void *)&entry};
ret = zxdh_np_dtb_table_entry_write(hw->slot_id,
dtb_data->queueid, 1, &user_entry_write);
if (ret != 0)
- PMD_DRV_LOG(ERR, "write vport_att failed vfid:%d failed", vport_num.vfid);
+ PMD_DRV_LOG(ERR, "write vport_att failed vfid:%d failed", vfid);
return ret;
}
@@ -72,6 +66,7 @@ zxdh_port_attr_init(struct rte_eth_dev *dev)
port_attr.mtu = dev->data->mtu;
port_attr.mtu_enable = 1;
port_attr.is_up = 0;
+ port_attr.hash_search_index = hw->hash_search_index;
if (!port_attr.rss_enable)
port_attr.port_base_qid = 0;
@@ -144,6 +139,7 @@ int zxdh_panel_table_init(struct rte_eth_dev *dev)
panel.pf_vfid = zxdh_vport_to_vfid(hw->vport);
panel.mtu_enable = 1;
panel.mtu = dev->data->mtu;
+ panel.port_vfid_1588 = panel.pf_vfid;
ZXDH_DTB_ERAM_ENTRY_INFO_T panel_entry = {
.index = hw->phyport,
@@ -212,13 +208,14 @@ zxdh_get_port_attr(struct zxdh_hw *hw, uint16_t vport, struct zxdh_port_attr_tab
{
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
- ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid, (uint32_t *)port_attr};
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
+ ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid, (uint32_t *)port_attr};
ZXDH_DTB_USER_ENTRY_T user_entry_get = {ZXDH_SDT_VPORT_ATT_TABLE, &entry};
int ret;
ret = zxdh_np_dtb_table_entry_get(hw->slot_id, dtb_data->queueid, &user_entry_get, 1);
if (ret != 0)
- PMD_DRV_LOG(ERR, "get port_attr vfid:%d failed, ret:%d", vport_num.vfid, ret);
+ PMD_DRV_LOG(ERR, "get port_attr vfid:%d failed, ret:%d", vfid, ret);
return ret;
}
@@ -229,7 +226,8 @@ zxdh_delete_port_attr(struct zxdh_hw *hw, uint16_t vport,
{
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
- ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid, (uint32_t *)port_attr};
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
+ ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid, (uint32_t *)port_attr};
ZXDH_DTB_USER_ENTRY_T user_entry = {
.sdt_no = ZXDH_SDT_VPORT_ATT_TABLE,
.p_entry_data = (void *)&entry
@@ -247,9 +245,9 @@ zxdh_add_mac_table(struct zxdh_hw *hw, uint16_t vport, struct rte_ether_addr *ad
struct zxdh_mac_unicast_table unicast_table = {0};
struct zxdh_mac_multicast_table multicast_table = {0};
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
uint32_t ret;
uint16_t group_id = 0;
- uint16_t vfid = vport_num.vfid;
if (rte_is_unicast_ether_addr(addr)) {
rte_memcpy(unicast_table.key.dmac_addr, addr, sizeof(struct rte_ether_addr));
@@ -351,15 +349,17 @@ zxdh_del_mac_table(struct zxdh_hw *hw, uint16_t vport, struct rte_ether_addr *ad
struct zxdh_mac_unicast_table unicast_table = {0};
struct zxdh_mac_multicast_table multicast_table = {0};
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
- uint32_t ret, del_flag = 0;
- uint16_t group_id = 0;
union zxdh_virport_num port = (union zxdh_virport_num)vport;
uint16_t vfid = zxdh_vport_to_vfid(port);
+ uint32_t ret, del_flag = 0;
+ uint16_t group_id = 0;
if (rte_is_unicast_ether_addr(addr)) {
rte_memcpy(unicast_table.key.dmac_addr, addr, sizeof(struct rte_ether_addr));
unicast_table.key.sriov_vlan_id = srv_vlanid;
unicast_table.key.sriov_vlan_tpid = srv_tpid;
+ unicast_table.entry.hit_flag = 0;
+ unicast_table.entry.vfid = rte_cpu_to_be_16(vfid & 0x7ff);
ZXDH_DTB_HASH_ENTRY_INFO_T dtb_hash_entry = {
.p_actu_key = (uint8_t *)&unicast_table.key,
@@ -800,6 +800,7 @@ zxdh_rss_table_set(struct zxdh_hw *hw, uint16_t vport, struct zxdh_rss_reta *rss
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
struct zxdh_rss_to_vqid_table rss_vqid = {0};
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
int ret = 0;
for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) {
@@ -820,7 +821,7 @@ zxdh_rss_table_set(struct zxdh_hw *hw, uint16_t vport, struct zxdh_rss_reta *rss
rss_vqid.vqm_qid[0] |= 0x8000;
#endif
ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {
- .index = vport_num.vfid * 32 + i,
+ .index = vfid * 32 + i,
.p_data = (uint32_t *)&rss_vqid
};
ZXDH_DTB_USER_ENTRY_T user_entry_write = {
@@ -830,7 +831,7 @@ zxdh_rss_table_set(struct zxdh_hw *hw, uint16_t vport, struct zxdh_rss_reta *rss
ret = zxdh_np_dtb_table_entry_write(hw->slot_id,
dtb_data->queueid, 1, &user_entry_write);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "write rss base qid failed vfid:%d", vport_num.vfid);
+ PMD_DRV_LOG(ERR, "write rss base qid failed vfid:%d", vfid);
return ret;
}
}
@@ -843,16 +844,17 @@ zxdh_rss_table_get(struct zxdh_hw *hw, uint16_t vport, struct zxdh_rss_reta *rss
struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
struct zxdh_rss_to_vqid_table rss_vqid = {0};
union zxdh_virport_num vport_num = (union zxdh_virport_num)vport;
+ uint16_t vfid = zxdh_vport_to_vfid(vport_num);
int ret = 0;
for (uint16_t i = 0; i < RTE_ETH_RSS_RETA_SIZE_256 / 8; i++) {
- ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vport_num.vfid * 32 + i, (uint32_t *)&rss_vqid};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T entry = {vfid * 32 + i, (uint32_t *)&rss_vqid};
ZXDH_DTB_USER_ENTRY_T user_entry = {ZXDH_SDT_RSS_ATT_TABLE, &entry};
ret = zxdh_np_dtb_table_entry_get(hw->slot_id,
dtb_data->queueid, &user_entry, 1);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "get rss tbl failed, vfid:%d", vport_num.vfid);
+ PMD_DRV_LOG(ERR, "get rss tbl failed, vfid:%d", vfid);
return -1;
}
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index 2f2ada3a9f..cb34e38be8 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -9,7 +9,13 @@
/* eram */
#define ZXDH_SDT_VPORT_ATT_TABLE 1
-
+#define ZXDH_SDT_PANEL_ATT_TABLE 2
+#define ZXDH_SDT_RSS_ATT_TABLE 3
+#define ZXDH_SDT_VLAN_ATT_TABLE 4
+#define ZXDH_SDT_BROCAST_ATT_TABLE 6
+#define ZXDH_SDT_UNICAST_ATT_TABLE 10
+#define ZXDH_SDT_MULTICAST_ATT_TABLE 11
+#define ZXDH_SDT_PORT_VLAN_ATT_TABLE 16
/* hash */
#define ZXDH_SDT_L2_ENTRY_TABLE0 64
#define ZXDH_SDT_L2_ENTRY_TABLE1 65
@@ -80,8 +86,6 @@
#define ZXDH_MTR_STATS_EGRESS_BASE 0x7481
#define ZXDH_MTR_STATS_INGRESS_BASE 0x7C81
-extern struct zxdh_dtb_shared_data g_dtb_data;
-
struct zxdh_port_vlan_table {
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint16_t business_vlan_tpid:16;
@@ -233,19 +237,51 @@ struct zxdh_port_attr_table {
};
struct zxdh_panel_table {
- uint16_t port_vfid_1588 : 11,
- rsv2 : 5;
- uint16_t pf_vfid : 11,
- rsv1 : 1,
- enable_1588_tc : 2,
- trust_mode : 1,
- hit_flag : 1;
- uint32_t mtu : 16,
- mtu_enable : 1,
- rsv : 3,
- tm_base_queue : 12;
- uint32_t rsv_1;
- uint32_t rsv_2;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint16_t port_vfid_1588 : 11,
+ rsv2 : 5;
+ uint16_t rsv1 : 11,
+ tm_shape_enable : 1,
+ enable_1588_tc : 2,
+ trust_mode : 1,
+ hit_flag : 1;
+ uint16_t mtu : 16;
+ uint16_t mtu_enable : 1,
+ rsv : 3,
+ tm_base_queue : 12;
+ uint16_t lacp_pf_qid : 12,
+ rsv5 : 4;
+ uint16_t lacp_pf_vfid : 11,
+ rsv6 : 2,
+ member_port_up : 1,
+ bond_link_up : 1,
+ hw_bond_enable : 1;
+ uint16_t rsv3 : 16;
+ uint16_t pf_vfid : 11,
+ rsv4 : 5;
+#else
+ uint16_t rsv1 : 11,
+ tm_shape_enable : 1,
+ enable_1588_tc : 2,
+ trust_mode : 1,
+ hit_flag : 1;
+ uint16_t port_vfid_1588 : 11,
+ rsv2 : 5;
+ uint16_t mtu_enable : 1,
+ rsv : 3,
+ tm_base_queue : 12;
+ uint16_t mtu : 16;
+ uint16_t lacp_pf_vfid : 11,
+ rsv6 : 2,
+ member_port_up : 1,
+ bond_link_up : 1,
+ hw_bond_enable : 1;
+ uint16_t lacp_pf_qid : 12,
+ rsv5 : 4;
+ uint16_t pf_vfid : 11,
+ rsv4 : 5;
+ uint16_t rsv3 : 16;
+#endif
}; /* 16B */
struct zxdh_mac_unicast_key {
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 80320 bytes --]
next prev parent reply other threads:[~2025-03-17 15:16 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-10 1:44 [PATCH v1 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-10 1:46 ` [PATCH v1 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-10 17:25 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-10 17:28 ` Stephen Hemminger
2025-02-10 17:30 ` Stephen Hemminger
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 18:23 ` Stephen Hemminger
2025-02-10 1:47 ` [PATCH v1 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-10 17:31 ` Stephen Hemminger
2025-02-10 1:48 ` [PATCH v1 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-10 17:33 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 17:35 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-10 17:36 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-10 17:40 ` Stephen Hemminger
2025-02-10 17:43 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-10 17:45 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-10 17:46 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-10 17:47 ` Stephen Hemminger
2025-02-10 1:50 ` [PATCH v1 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-10 1:50 ` [PATCH v1 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 17:50 ` Stephen Hemminger
2025-02-10 18:19 ` Stephen Hemminger
2025-02-22 7:22 ` [PATCH v2 00/14] add network processor ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 03/14] net/zxdh: add agent channel Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-02-22 7:22 ` [PATCH v2 14/14] net/zxdh: clean stat values Bingbin Chen
2025-02-22 17:34 ` Stephen Hemminger
2025-03-05 8:13 ` [PATCH v3 00/14] net/zxdh: add network processor ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 00/14] net/zxdh: add network processor ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 01/14] net/zxdh: add network processor registers ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 03/14] net/zxdh: add agent channel Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-03-17 14:57 ` [PATCH v4 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-03-17 14:58 ` [PATCH v4 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-03-17 14:58 ` [PATCH v4 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-03-17 14:58 ` Bingbin Chen [this message]
2025-03-05 8:13 ` [PATCH v3 02/14] net/zxdh: support compatibility check Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 03/14] net/zxdh: add agent channel Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 04/14] net/zxdh: modify dtb queue ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 05/14] net/zxdh: add tables dump address ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 06/14] net/zxdh: add eram tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 07/14] net/zxdh: get flow tables resources Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 08/14] net/zxdh: support hash resources configuration Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 09/14] net/zxdh: implement tables initialization Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 10/14] net/zxdh: support hash tables write and delete ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 11/14] net/zxdh: get hash table entry result Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 12/14] net/zxdh: delete all hash entries Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 13/14] net/zxdh: add acl tables ops Bingbin Chen
2025-03-05 8:13 ` [PATCH v3 14/14] net/zxdh: modify parameters of the plcr function Bingbin Chen
2025-03-10 23:19 ` [PATCH v1 01/14] net/zxdh: add network processor registers ops Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250317145802.1819809-15-chen.bingbin@zte.com.cn \
--to=chen.bingbin@zte.com.cn \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
--cc=wang.junlong1@zte.com.cn \
--cc=yang.yonggang@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).