* [PATCH v1 0/2] add support flow director ops
@ 2025-06-17 9:31 Bingbin Chen
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
` (2 more replies)
0 siblings, 3 replies; 9+ messages in thread
From: Bingbin Chen @ 2025-06-17 9:31 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 1073 bytes --]
V1:
- add support flow director ops.
Bingbin Chen (2):
net/zxdh: npsdk add flow director table ops
net/zxdh: add support flow director ops
doc/guides/nics/features/zxdh.ini | 16 +
doc/guides/nics/zxdh.rst | 1 +
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_common.h | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 27 +
drivers/net/zxdh/zxdh_ethdev.h | 13 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 2 +-
drivers/net/zxdh/zxdh_ethdev_ops.h | 1 +
drivers/net/zxdh/zxdh_flow.c | 2004 ++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_flow.h | 237 ++++
drivers/net/zxdh/zxdh_msg.c | 263 +++-
drivers/net/zxdh/zxdh_msg.h | 31 +-
drivers/net/zxdh/zxdh_np.c | 1664 +++++++++++++++++++++++
drivers/net/zxdh/zxdh_np.h | 42 +-
drivers/net/zxdh/zxdh_tables.h | 10 +-
15 files changed, 4241 insertions(+), 72 deletions(-)
create mode 100644 drivers/net/zxdh/zxdh_flow.c
create mode 100644 drivers/net/zxdh/zxdh_flow.h
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 2211 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops
2025-06-17 9:31 [PATCH v1 0/2] add support flow director ops Bingbin Chen
@ 2025-06-17 9:32 ` Bingbin Chen
2025-06-17 14:07 ` Stephen Hemminger
2025-06-17 14:08 ` Stephen Hemminger
2025-06-17 9:32 ` [PATCH v1 2/2] net/zxdh: add support flow director ops Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 0/2] " Bingbin Chen
2 siblings, 2 replies; 9+ messages in thread
From: Bingbin Chen @ 2025-06-17 9:32 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 53838 bytes --]
Implement flow director table entry write, delete and get
operation functions by dtb channel.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
drivers/net/zxdh/zxdh_np.c | 1664 ++++++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_np.h | 42 +-
2 files changed, 1705 insertions(+), 1 deletion(-)
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index 66902e7e92..48867edd5e 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -3055,6 +3055,82 @@ zxdh_np_agent_channel_se_res_get(uint32_t dev_id,
return msg_result;
}
+static uint32_t
+zxdh_np_agent_channel_acl_index_request(uint32_t dev_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t *p_index)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t rsp_buff[2] = {0};
+ uint32_t msg_result = 0;
+ uint32_t acl_index = 0;
+ ZXDH_AGENT_CHANNEL_ACL_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_ACL_MSG,
+ .oper = ZXDH_ACL_INDEX_REQUEST,
+ .vport = vport,
+ .sdt_no = sdt_no,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_ACL_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "agent send msg failed");
+ return ZXDH_ERR;
+ }
+
+ msg_result = rsp_buff[0];
+ acl_index = rsp_buff[1];
+
+ PMD_DRV_LOG(DEBUG, "dev_id: %d, msg_result: %d", dev_id, msg_result);
+ PMD_DRV_LOG(DEBUG, "dev_id: %d, acl_index: %d", dev_id, acl_index);
+
+ *p_index = acl_index;
+
+ return msg_result;
+}
+
+static uint32_t
+zxdh_np_agent_channel_acl_index_release(uint32_t dev_id,
+ uint32_t rel_type,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t index)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t msg_result = 0;
+ uint32_t rsp_buff[2] = {0};
+ ZXDH_AGENT_CHANNEL_ACL_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_ACL_MSG,
+ .oper = rel_type,
+ .index = index,
+ .sdt_no = sdt_no,
+ .vport = vport,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_ACL_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "agent send msg failed");
+ return ZXDH_ERR;
+ }
+
+ msg_result = rsp_buff[0];
+ PMD_DRV_LOG(DEBUG, "msg_result: %d", msg_result);
+
+ return msg_result;
+}
+
static ZXDH_DTB_MGR_T *
zxdh_np_dtb_mgr_get(uint32_t dev_id)
{
@@ -6500,6 +6576,11 @@ zxdh_np_dtb_table_entry_delete(uint32_t dev_id,
if (rc == ZXDH_HASH_RC_DEL_SRHFAIL)
continue;
break;
+ case ZXDH_SDT_TBLT_ETCAM:
+ rc = zxdh_np_dtb_acl_one_entry(dev_id, sdt_no,
+ ZXDH_DTB_ITEM_DELETE, pentry->p_entry_data,
+ &dtb_len, p_data_buff);
+ continue;
default:
PMD_DRV_LOG(ERR, "SDT table_type[ %u ] is invalid!", tbl_type);
rte_free(p_data_buff);
@@ -11204,3 +11285,1586 @@ zxdh_np_stat_car_queue_cfg_set(uint32_t dev_id,
return rc;
}
+
+uint32_t
+zxdh_np_dtb_acl_index_request(uint32_t dev_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t *p_index)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index = 0;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+ rc = zxdh_np_agent_channel_acl_index_request(dev_id, sdt_no, vport, &index);
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ *p_index = index;
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_index_release(uint32_t dev_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t index)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+
+ rc = zxdh_np_agent_channel_acl_index_release(dev_id,
+ ZXDH_ACL_INDEX_RELEASE, sdt_no, vport, index);
+
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_sdt_eram_table_dump(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t start_index,
+ uint32_t depth,
+ uint32_t *p_data,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_base_addr = 0;
+ uint32_t dump_addr_128bit = 0;
+ uint32_t dump_item_index = 0;
+ uint32_t dump_data_len = 0;
+ uint32_t dump_desc_len = 0;
+ uint64_t dump_sdt_phy_addr = 0;
+ uint64_t dump_sdt_vir_addr = 0;
+ uint32_t dump_addr_size = 0;
+ uint32_t dump_dst_phy_haddr = 0;
+ uint32_t dump_dst_phy_laddr = 0;
+ uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ eram_base_addr = sdt_eram.eram_base_addr;
+ dump_addr_128bit = eram_base_addr + start_index;
+
+ rc = zxdh_np_dtb_dump_sdt_addr_get(dev_id,
+ queue_id,
+ sdt_no,
+ &dump_sdt_phy_addr,
+ &dump_sdt_vir_addr,
+ &dump_addr_size);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_dump_sdt_addr_get");
+
+ memset((uint8_t *)dump_sdt_vir_addr, 0, dump_addr_size);
+ rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &dump_item_index);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_free_item_get");
+ PMD_DRV_LOG(DEBUG, "dump queue id %d, element_id is: %d.",
+ queue_id, dump_item_index);
+
+ *element_id = dump_item_index;
+
+ rc = zxdh_np_dtb_tab_up_item_user_addr_set(dev_id,
+ queue_id,
+ dump_item_index,
+ dump_sdt_phy_addr,
+ dump_sdt_vir_addr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_item_addr_set");
+
+ rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, dump_item_index,
+ &dump_dst_phy_haddr, &dump_dst_phy_laddr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_item_addr_get");
+
+ rc = zxdh_np_dtb_smmu0_dump_info_write(dev_id,
+ dump_addr_128bit,
+ depth,
+ dump_dst_phy_haddr,
+ dump_dst_phy_laddr,
+ (uint32_t *)form_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_dump_info_write");
+
+ dump_data_len = depth * 128 / 32;
+ dump_desc_len = ZXDH_DTB_LEN_POS_SETP / 4;
+
+ if (dump_data_len * 4 > dump_addr_size) {
+ PMD_DRV_LOG(ERR, "eram dump size is too small!");
+ return ZXDH_RC_DTB_DUMP_SIZE_SMALL;
+ }
+
+ rc = zxdh_np_dtb_write_dump_desc_info(dev_id,
+ queue_id,
+ dump_item_index,
+ (uint32_t *)form_buff,
+ dump_data_len,
+ dump_desc_len,
+ p_data);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_write_dump_desc_info");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_table_dump(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ ZXDH_DTB_DUMP_INDEX_T start_index,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr,
+ uint32_t *entry_num,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T *next_start_index,
+ uint32_t *finish_flag)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t i = 0;
+ uint32_t dump_mode = 0;
+ uint32_t eram_table_depth = 0;
+ uint32_t start_index_128bit = 0;
+ uint32_t row_index = 0;
+ uint32_t col_index = 0;
+ uint32_t dump_depth_128bit = 0;
+ uint32_t dump_depth = 0;
+ uint32_t element_id = 0;
+ uint8_t *dump_data_buff = NULL;
+ uint8_t *temp_data = NULL;
+ uint32_t remain = 0;
+ uint32_t *buff = NULL;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_user_data = NULL;
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ dump_mode = sdt_eram.eram_mode;
+ eram_table_depth = sdt_eram.eram_table_depth;
+
+ zxdh_np_eram_index_cal(dump_mode, eram_table_depth,
+ &dump_depth_128bit, &col_index);
+
+ zxdh_np_eram_index_cal(dump_mode, start_index.index,
+ &start_index_128bit, &col_index);
+
+ dump_depth = dump_depth_128bit - start_index_128bit;
+
+ dump_data_buff = (uint8_t *)rte_zmalloc(NULL, dump_depth * ZXDH_DTB_LEN_POS_SETP, 0);
+ if (dump_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_sdt_eram_table_dump(dev_id,
+ queue_id,
+ sdt_no,
+ start_index_128bit,
+ dump_depth,
+ (uint32_t *)dump_data_buff,
+ &element_id);
+
+ if (dump_mode == ZXDH_ERAM128_TBL_128b) {
+ for (i = 0; i < dump_depth; i++) {
+ p_dump_user_data = p_dump_data_arr + i;
+ temp_data = dump_data_buff + i * ZXDH_DTB_LEN_POS_SETP;
+ if (p_dump_user_data == NULL || p_dump_user_data->p_data == NULL) {
+ PMD_DRV_LOG(ERR, "data buff is NULL!");
+ rte_free(dump_data_buff);
+ return ZXDH_ERR;
+ }
+
+ p_dump_user_data->index = start_index.index + i;
+ rte_memcpy(p_dump_user_data->p_data, temp_data, (128 / 8));
+ }
+ } else if (dump_mode == ZXDH_ERAM128_TBL_64b) {
+ remain = start_index.index % 2;
+ for (i = 0; i < eram_table_depth - start_index.index; i++) {
+ zxdh_np_eram_index_cal(dump_mode, remain, &row_index, &col_index);
+ temp_data = dump_data_buff + row_index * ZXDH_DTB_LEN_POS_SETP;
+
+ buff = (uint32_t *)temp_data;
+ p_dump_user_data = p_dump_data_arr + i;
+
+ if (p_dump_user_data == NULL || p_dump_user_data->p_data == NULL) {
+ PMD_DRV_LOG(ERR, "data buff is NULL!");
+ rte_free(dump_data_buff);
+ return ZXDH_ERR;
+ }
+
+ p_dump_user_data->index = start_index.index + i;
+ rte_memcpy(p_dump_user_data->p_data,
+ buff + ((1 - col_index) << 1), (64 / 8));
+
+ remain++;
+ }
+ }
+
+ *entry_num = eram_table_depth - start_index.index;
+ *finish_flag = 1;
+ PMD_DRV_LOG(DEBUG, "dump entry num %d, finish flag %d", *entry_num, *finish_flag);
+
+ rte_free(dump_data_buff);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_index_parse(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t eram_sdt_no,
+ uint32_t vport,
+ uint32_t *index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_table_depth = 0;
+ uint32_t byte_num = 0;
+ uint32_t i = 0;
+ uint32_t entry_num = 0;
+ uint32_t valid_entry_num = 0;
+ uint32_t finish_flag = 0;
+ uint8_t valid = 0;
+ uint32_t temp_vport = 0;
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+ ZXDH_DTB_DUMP_INDEX_T start_index = {0};
+ ZXDH_DTB_DUMP_INDEX_T next_start_index = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ byte_num = (sdt_eram.eram_mode == ZXDH_ERAM128_TBL_64b) ? 8 : 16;
+ eram_table_depth = sdt_eram.eram_table_depth;
+ p_dump_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, eram_table_depth *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_dump_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "p_dump_data_arr point null!");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, byte_num * eram_table_depth, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_dump_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < eram_table_depth; i++) {
+ p_dump_data_arr[i].index = i;
+ p_dump_data_arr[i].p_data = (uint32_t *)(data_buff + i * byte_num);
+ }
+
+ start_index.index = 0;
+ rc = zxdh_np_dtb_eram_table_dump(dev_id,
+ queue_id,
+ eram_sdt_no,
+ start_index,
+ p_dump_data_arr,
+ &entry_num,
+ &next_start_index,
+ &finish_flag);
+
+ for (i = 0; i < entry_num; i++) {
+ valid = (p_dump_data_arr[i].p_data[0] >> 31) & 0x1;
+ temp_vport = p_dump_data_arr[i].p_data[0] & 0x7fffffff;
+ if (valid && temp_vport == vport) {
+ p_index_array[valid_entry_num] = i;
+ valid_entry_num++;
+ }
+ }
+
+ *index_num = valid_entry_num;
+ rte_free(data_buff);
+ rte_free(p_dump_data_arr);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_etcam_ind_data_get(uint8_t *p_in_data, uint32_t rd_mode, uint8_t *p_out_data)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint8_t *p_temp = NULL;
+ uint32_t offset = 0;
+ uint8_t buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+
+ p_temp = p_out_data;
+ rte_memcpy(buff, p_in_data, ZXDH_ETCAM_WIDTH_MAX / 8);
+
+ zxdh_np_comm_swap(buff, ZXDH_ETCAM_WIDTH_MAX / 8);
+
+ for (i = 0; i < ZXDH_ETCAM_RAM_NUM; i++) {
+ offset = i * (ZXDH_ETCAM_WIDTH_MIN / 8);
+
+ if ((rd_mode >> (ZXDH_ETCAM_RAM_NUM - 1 - i)) & 0x1) {
+ rte_memcpy(p_temp, buff + offset, ZXDH_ETCAM_WIDTH_MIN / 8);
+ p_temp += ZXDH_ETCAM_WIDTH_MIN / 8;
+ }
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_table_dump(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T start_index,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_data_arr,
+ uint32_t *p_entry_num,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T *next_start_index,
+ uint32_t *p_finish_flag)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint32_t handle = 0;
+
+ uint32_t dump_element_id = 0;
+
+ uint8_t *temp_dump_out_data = NULL;
+ uint8_t *dump_info_buff = NULL;
+ uint8_t *p_data_start = NULL;
+ uint8_t *p_data_640bit = NULL;
+ uint8_t *p_mask_start = NULL;
+ uint8_t *p_mask_640bit = NULL;
+ uint8_t *p_rst_start = NULL;
+ uint8_t *p_rst_128bit = NULL;
+ uint32_t *eram_buff = NULL;
+
+ uint32_t addr_640bit = 0;
+ uint32_t rd_mask = 0;
+ uint32_t dump_eram_depth_128bit = 0;
+ uint32_t eram_row_index = 0;
+ uint32_t eram_col_index = 0;
+
+ uint8_t cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t xy_data[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t xy_mask[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t dm_data[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t dm_mask[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ ZXDH_ETCAM_ENTRY_T entry_xy = {0};
+ ZXDH_ETCAM_ENTRY_T entry_dm = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_user_data = NULL;
+
+ uint32_t block_num = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t etcam_table_id = 0;
+ uint32_t as_enable = 0;
+ uint32_t as_eram_baddr = 0;
+ uint32_t etcam_as_mode = 0;
+ uint32_t etcam_table_depth = 0;
+ uint32_t block_idx = 0;
+
+ uint32_t etcam_data_dst_phy_haddr = 0;
+ uint32_t etcam_data_dst_phy_laddr = 0;
+ uint32_t etcam_mask_dst_phy_haddr = 0;
+ uint32_t etcam_mask_dst_phy_laddr = 0;
+ uint32_t as_rst_dst_phy_haddr = 0;
+ uint32_t as_rst_dst_phy_laddr = 0;
+
+ uint32_t dtb_desc_addr_offset = 0;
+ uint32_t dump_data_len = 0;
+ uint32_t dtb_desc_len = 0;
+
+ uint32_t etcam_data_len_offset = 0;
+ uint32_t etcam_mask_len_offset = 0;
+ uint32_t data_byte_size = 0;
+
+ ZXDH_ACL_CFG_EX_T *p_acl_cfg = NULL;
+ ZXDH_ACL_TBL_CFG_T *p_tbl_cfg = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_ETCAM_DUMP_INFO_T etcam_dump_info = {0};
+ ZXDH_DTB_ENTRY_T dtb_dump_entry = {0};
+
+ uint32_t shift_amount = 0;
+ uint32_t mask_base = 0;
+ uint32_t offset = 0;
+
+ dtb_dump_entry.cmd = cmd_buff;
+ entry_xy.p_data = xy_data;
+ entry_xy.p_mask = xy_mask;
+ entry_dm.p_data = dm_data;
+ entry_dm.p_mask = dm_mask;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+ etcam_table_id = sdt_etcam_info.etcam_table_id;
+ as_enable = sdt_etcam_info.as_en;
+ as_eram_baddr = sdt_etcam_info.as_eram_baddr;
+ etcam_table_depth = sdt_etcam_info.etcam_table_depth;
+
+ zxdh_np_acl_cfg_get(dev_id, &p_acl_cfg);
+
+ p_tbl_cfg = p_acl_cfg->acl_tbls + etcam_table_id;
+
+ if (!p_tbl_cfg->is_used) {
+ PMD_DRV_LOG(ERR, "table[ %d ] is not init!", etcam_table_id);
+ RTE_ASSERT(0);
+ return ZXDH_ACL_RC_TBL_NOT_INIT;
+ }
+
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+ if (data_byte_size > ZXDH_ETCAM_RAM_WIDTH) {
+ PMD_DRV_LOG(ERR, "etcam date size is over 80B!");
+ return ZXDH_ACL_RC_INVALID_PARA;
+ }
+
+ block_num = p_tbl_cfg->block_num;
+
+ rc = zxdh_np_dtb_dump_addr_set(dev_id, queue_id, sdt_no, &dump_element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_dump_addr_set");
+
+ dump_info_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DUMP_INFO_BUFF_SIZE, 0);
+ if (dump_info_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < block_num; i++) {
+ block_idx = p_tbl_cfg->block_array[i];
+
+ PMD_DRV_LOG(DEBUG, "block_idx: %d", block_idx);
+
+ etcam_dump_info.block_sel = block_idx;
+ etcam_dump_info.addr = 0;
+ etcam_dump_info.tb_width = 3;
+ etcam_dump_info.rd_mode = 0xFF;
+ etcam_dump_info.tb_depth = ZXDH_ETCAM_RAM_DEPTH;
+ etcam_dump_info.data_or_mask = ZXDH_ETCAM_DTYPE_DATA;
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &etcam_data_dst_phy_haddr,
+ &etcam_data_dst_phy_laddr);
+
+ zxdh_np_dtb_etcam_dump_entry(dev_id,
+ &etcam_dump_info,
+ etcam_data_dst_phy_haddr,
+ etcam_data_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += ZXDH_ETCAM_RAM_DEPTH * 640 / 8;
+ }
+
+ etcam_data_len_offset = dump_data_len;
+
+ for (i = 0; i < block_num; i++) {
+ block_idx = p_tbl_cfg->block_array[i];
+
+ PMD_DRV_LOG(DEBUG, "mask: block_idx: %d", block_idx);
+
+ etcam_dump_info.block_sel = block_idx;
+ etcam_dump_info.addr = 0;
+ etcam_dump_info.tb_width = 3;
+ etcam_dump_info.rd_mode = 0xFF;
+ etcam_dump_info.tb_depth = ZXDH_ETCAM_RAM_DEPTH;
+ etcam_dump_info.data_or_mask = ZXDH_ETCAM_DTYPE_MASK;
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &etcam_mask_dst_phy_haddr,
+ &etcam_mask_dst_phy_laddr);
+
+ zxdh_np_dtb_etcam_dump_entry(dev_id,
+ &etcam_dump_info,
+ etcam_mask_dst_phy_haddr,
+ etcam_mask_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += ZXDH_ETCAM_RAM_DEPTH * 640 / 8;
+ }
+ etcam_mask_len_offset = dump_data_len;
+
+ if (as_enable) {
+ zxdh_np_eram_index_cal(etcam_as_mode,
+ etcam_table_depth, &dump_eram_depth_128bit, &eram_col_index);
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &as_rst_dst_phy_haddr,
+ &as_rst_dst_phy_laddr);
+
+ zxdh_np_dtb_smmu0_dump_entry(dev_id,
+ as_eram_baddr,
+ dump_eram_depth_128bit,
+ as_rst_dst_phy_haddr,
+ as_rst_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += dump_eram_depth_128bit * 128 / 8;
+ }
+
+ temp_dump_out_data = (uint8_t *)rte_zmalloc(NULL, dump_data_len * sizeof(uint8_t), 0);
+ if (temp_dump_out_data == NULL) {
+ PMD_DRV_LOG(ERR, "temp_dump_out_data point null!");
+ rte_free(dump_info_buff);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_write_dump_desc_info(dev_id,
+ queue_id,
+ dump_element_id,
+ (uint32_t *)dump_info_buff,
+ dump_data_len / 4,
+ dtb_desc_len * 4,
+ (uint32_t *)temp_dump_out_data);
+ rte_free(dump_info_buff);
+
+ p_data_start = temp_dump_out_data;
+ p_mask_start = temp_dump_out_data + etcam_data_len_offset;
+ if (as_enable)
+ p_rst_start = temp_dump_out_data + etcam_mask_len_offset;
+
+ for (handle = 0; handle < etcam_table_depth; handle++) {
+ p_dump_user_data = p_dump_data_arr + handle;
+
+ if (p_dump_user_data == NULL ||
+ p_dump_user_data->key_data == NULL ||
+ p_dump_user_data->key_mask == NULL) {
+ PMD_DRV_LOG(ERR, "etcam handle 0x%x data user buff is NULL!", handle);
+ rte_free(temp_dump_out_data);
+ return ZXDH_ERR;
+ }
+
+ if (as_enable) {
+ if (p_dump_user_data->p_as_rslt == NULL) {
+ PMD_DRV_LOG(ERR, "handle 0x%x data buff is NULL!", handle);
+ rte_free(temp_dump_out_data);
+ return ZXDH_ERR;
+ }
+ }
+
+ p_dump_user_data->handle = handle;
+
+ shift_amount = 8U >> etcam_key_mode;
+ mask_base = (1U << shift_amount) - 1;
+ offset = shift_amount * (handle % (1U << etcam_key_mode));
+ rd_mask = (mask_base << offset) & 0xFF;
+
+ addr_640bit = handle / (1U << etcam_key_mode);
+ p_data_640bit = p_data_start + addr_640bit * 640 / 8;
+ p_mask_640bit = p_mask_start + addr_640bit * 640 / 8;
+
+ zxdh_np_dtb_etcam_ind_data_get(p_data_640bit, rd_mask, entry_xy.p_data);
+ zxdh_np_dtb_etcam_ind_data_get(p_mask_640bit, rd_mask, entry_xy.p_mask);
+
+ zxdh_np_etcam_xy_to_dm(&entry_dm, &entry_xy, data_byte_size);
+
+ rte_memcpy(p_dump_user_data->key_data, entry_dm.p_data, data_byte_size);
+ rte_memcpy(p_dump_user_data->key_mask, entry_dm.p_mask, data_byte_size);
+
+ if (as_enable) {
+ zxdh_np_eram_index_cal(etcam_as_mode,
+ handle, &eram_row_index, &eram_col_index);
+
+ p_rst_128bit = p_rst_start + eram_row_index * ZXDH_DTB_LEN_POS_SETP;
+
+ eram_buff = (uint32_t *)p_rst_128bit;
+
+ if (etcam_as_mode == ZXDH_ERAM128_TBL_128b)
+ rte_memcpy(p_dump_user_data->p_as_rslt, eram_buff, (128 / 8));
+ else if (etcam_as_mode == ZXDH_ERAM128_TBL_64b)
+ rte_memcpy(p_dump_user_data->p_as_rslt,
+ eram_buff + ((1 - eram_col_index) << 1), (64 / 8));
+ }
+ }
+
+ *p_entry_num = etcam_table_depth;
+ *p_finish_flag = 1;
+
+ rte_free(temp_dump_out_data);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_smmu0_tbl_size_get(uint32_t eram_mode)
+{
+ uint32_t size = 0;
+ if (eram_mode == ZXDH_ERAM128_TBL_128b)
+ size = 16;
+ else if (eram_mode == ZXDH_ERAM128_TBL_64b)
+ size = 8;
+ else if (eram_mode == ZXDH_ERAM128_TBL_32b)
+ size = 4;
+ else
+ size = 1;
+
+ return size;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_data_get_by_handle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t index_num,
+ uint32_t *p_index_array,
+ uint8_t *p_dump_data)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t i = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t etcam_table_depth = 0;
+ uint32_t as_len = 0;
+ uint32_t data_byte_size = 0;
+ uint32_t entry_num = 0;
+ uint32_t finish_flag = 0;
+ uint8_t *data_buff = NULL;
+ uint8_t *mask_buff = NULL;
+ uint8_t *eram_buff = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_DTB_DUMP_INDEX_T start_index = {0};
+ ZXDH_DTB_DUMP_INDEX_T next_start_index = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dtb_acl_entry = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_temp_entry = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_entry = NULL;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_etcam_info.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_etcam_info.table_type);
+ return ZXDH_ERR;
+ }
+
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ etcam_table_depth = sdt_etcam_info.etcam_table_depth;
+ as_len = zxdh_np_smmu0_tbl_size_get(sdt_etcam_info.as_rsp_mode);
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+
+ p_dtb_acl_entry = (ZXDH_DTB_ACL_ENTRY_INFO_T *)rte_zmalloc(NULL, etcam_table_depth *
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+ if (p_dtb_acl_entry == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * data_byte_size, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ mask_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * data_byte_size, 0);
+ if (mask_buff == NULL) {
+ PMD_DRV_LOG(ERR, "mask_buff point null!");
+ rte_free(data_buff);
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ eram_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * as_len, 0);
+ if (eram_buff == NULL) {
+ PMD_DRV_LOG(ERR, "eram_buff point null!");
+ rte_free(mask_buff);
+ rte_free(data_buff);
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < etcam_table_depth; i++) {
+ p_dtb_acl_entry[i].handle = i;
+ p_dtb_acl_entry[i].key_data = data_buff + i * data_byte_size;
+ p_dtb_acl_entry[i].key_mask = mask_buff + i * data_byte_size;
+ p_dtb_acl_entry[i].p_as_rslt = eram_buff + i * as_len;
+ }
+
+ rc = zxdh_np_dtb_acl_table_dump(dev_id,
+ queue_id,
+ sdt_no,
+ start_index,
+ p_dtb_acl_entry,
+ &entry_num,
+ &next_start_index,
+ &finish_flag);
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "acl sdt[%u] dump fail, rc:0x%x", sdt_no, rc);
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ rte_free(eram_buff);
+ rte_free(p_dtb_acl_entry);
+ return rc;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_dump_entry = ((ZXDH_DTB_ACL_ENTRY_INFO_T *)p_dump_data) + i;
+ p_dump_entry->handle = p_index_array[i];
+ p_temp_entry = p_dtb_acl_entry + p_index_array[i];
+ rte_memcpy(p_dump_entry->key_data, p_temp_entry->key_data, data_byte_size);
+ rte_memcpy(p_dump_entry->key_mask, p_temp_entry->key_mask, data_byte_size);
+ rte_memcpy(p_dump_entry->p_as_rslt, p_temp_entry->p_as_rslt, as_len);
+ }
+
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ rte_free(eram_buff);
+ rte_free(p_dtb_acl_entry);
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_table_dump_by_vport(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t *entry_num,
+ uint8_t *p_dump_data)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index_num = 0;
+ uint32_t eram_sdt_no = 0;
+ uint32_t *p_index_array = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_etcam_info.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_etcam_info.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ p_index_array = (uint32_t *)rte_zmalloc(NULL,
+ sizeof(uint32_t) * sdt_eram.eram_table_depth, 0);
+ if (p_index_array == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_acl_index_parse(dev_id, queue_id,
+ eram_sdt_no, vport, &index_num, p_index_array);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl index parse failed");
+ return ZXDH_ERR;
+ }
+
+ if (!index_num) {
+ PMD_DRV_LOG(ERR, "SDT[%d] vport[0x%x] item num is zero!", sdt_no, vport);
+ rte_free(p_index_array);
+ return ZXDH_OK;
+ }
+
+ rc = zxdh_np_dtb_acl_data_get_by_handle(dev_id, queue_id, sdt_no,
+ index_num, p_index_array, p_dump_data);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl date by handle failed");
+ return ZXDH_ERR;
+ }
+
+ *entry_num = index_num;
+ rte_free(p_index_array);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_dma_insert_cycle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t as_eram_baddr = 0;
+ uint32_t as_enable = 0;
+ uint32_t etcam_table_id = 0;
+ uint32_t etcam_as_mode = 0;
+ uint32_t block_idx = 0;
+ uint32_t ram_addr = 0;
+ uint32_t etcam_wr_mode = 0;
+ uint32_t eram_wrt_mode = 0;
+ uint32_t eram_index = 0;
+
+ uint32_t item_cnt = 0;
+ uint32_t addr_offset_bk = 0;
+ uint32_t dtb_len = 0;
+ uint32_t as_addr_offset = 0;
+ uint32_t as_dtb_len = 0;
+
+ ZXDH_ACL_CFG_EX_T *p_acl_cfg = NULL;
+ ZXDH_ACL_TBL_CFG_T *p_tbl_cfg = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry = NULL;
+ uint32_t *p_as_eram_data = NULL;
+ uint8_t *table_data_buff = NULL;
+ ZXDH_ETCAM_ENTRY_T etcam_entry = {0};
+
+ uint8_t entry_data_buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t entry_mask_buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint32_t as_eram_data_buff[4] = {0};
+ uint8_t entry_data_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t entry_mask_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t as_eram_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+
+ ZXDH_DTB_ENTRY_T entry_data = {0};
+ ZXDH_DTB_ENTRY_T entry_mask = {0};
+ ZXDH_DTB_ENTRY_T dtb_as_data_entry = {0};
+
+ entry_data.cmd = entry_data_cmd_buff;
+ entry_data.data = (uint8_t *)entry_data_buff;
+
+ entry_mask.cmd = entry_mask_cmd_buff;
+ entry_mask.data = (uint8_t *)entry_mask_buff;
+
+ dtb_as_data_entry.cmd = as_eram_cmd_buff;
+ dtb_as_data_entry.data = (uint8_t *)as_eram_data_buff;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+ etcam_table_id = sdt_etcam_info.etcam_table_id;
+ as_enable = sdt_etcam_info.as_en;
+ as_eram_baddr = sdt_etcam_info.as_eram_baddr;
+
+ if (as_enable) {
+ switch (etcam_as_mode) {
+ case ZXDH_ERAM128_TBL_128b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_128b;
+ break;
+ case ZXDH_ERAM128_TBL_64b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_64b;
+ break;
+ case ZXDH_ERAM128_TBL_1b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_1b;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "etcam_as_mode is invalid!");
+ return ZXDH_ERR;
+ }
+ }
+
+ zxdh_np_acl_cfg_get(dev_id, &p_acl_cfg);
+
+ p_tbl_cfg = p_acl_cfg->acl_tbls + etcam_table_id;
+
+ if (!p_tbl_cfg->is_used) {
+ PMD_DRV_LOG(ERR, "table[ %d ] is not init!", etcam_table_id);
+ RTE_ASSERT(0);
+ return ZXDH_ACL_RC_TBL_NOT_INIT;
+ }
+
+ table_data_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0);
+ if (table_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) {
+ p_acl_entry = p_acl_entry_arr + item_cnt;
+
+ etcam_entry.mode = p_tbl_cfg->key_mode;
+ etcam_entry.p_data = p_acl_entry->key_data;
+ etcam_entry.p_mask = p_acl_entry->key_mask;
+
+ zxdh_np_acl_hdw_addr_get(p_tbl_cfg, p_acl_entry->handle,
+ &block_idx, &ram_addr, &etcam_wr_mode);
+
+ zxdh_np_dtb_etcam_entry_add(dev_id,
+ ram_addr,
+ block_idx,
+ etcam_wr_mode,
+ ZXDH_ETCAM_OPR_DM,
+ &etcam_entry,
+ &entry_data,
+ &entry_mask);
+
+ dtb_len += ZXDH_DTB_ETCAM_LEN_SIZE;
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset_bk, &entry_data);
+
+ memset(entry_data_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_data_buff, 0, ZXDH_ETCAM_WIDTH_MAX / 8);
+ addr_offset_bk = addr_offset_bk + ZXDH_DTB_ETCAM_LEN_SIZE * ZXDH_DTB_LEN_POS_SETP;
+
+ dtb_len += ZXDH_DTB_ETCAM_LEN_SIZE;
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset_bk, &entry_mask);
+
+ memset(entry_mask_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_mask_buff, 0, ZXDH_ETCAM_WIDTH_MAX / 8);
+ addr_offset_bk = addr_offset_bk + ZXDH_DTB_ETCAM_LEN_SIZE * ZXDH_DTB_LEN_POS_SETP;
+
+ if (as_enable) {
+ p_as_eram_data = (uint32_t *)(p_acl_entry->p_as_rslt);
+
+ zxdh_np_dtb_se_smmu0_ind_write(dev_id,
+ as_eram_baddr,
+ eram_index,
+ eram_wrt_mode,
+ p_as_eram_data,
+ &dtb_as_data_entry);
+
+ switch (eram_wrt_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ as_dtb_len = 2;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP * 2;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ as_dtb_len = 1;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ as_dtb_len = 1;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP;
+ break;
+ }
+
+ zxdh_np_dtb_data_write(table_data_buff,
+ addr_offset_bk, &dtb_as_data_entry);
+ addr_offset_bk = addr_offset_bk + as_addr_offset;
+ dtb_len += as_dtb_len;
+
+ memset(as_eram_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(as_eram_data_buff, 0, 4 * sizeof(uint32_t));
+ }
+ }
+
+ rc = zxdh_np_dtb_write_down_table_data(dev_id,
+ queue_id,
+ dtb_len * 16,
+ table_data_buff,
+ element_id);
+ rte_free(table_data_buff);
+
+ rc = zxdh_np_dtb_tab_down_success_status_check(dev_id, queue_id, *element_id);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_dma_insert(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t as_enable;
+ uint32_t etcam_as_mode;
+ uint32_t entry_num_max = 0;
+ uint32_t entry_cycle = 0;
+ uint32_t entry_remains = 0;
+ uint32_t i = 0;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_entry = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_data_write");
+
+ as_enable = sdt_etcam_info.as_en;
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+
+ if (!as_enable) {
+ entry_num_max = 0x55;
+ } else {
+ if (etcam_as_mode == ZXDH_ERAM128_TBL_128b)
+ entry_num_max = 0x49;
+ else
+ entry_num_max = 0x4e;
+ }
+
+ entry_cycle = entry_num / entry_num_max;
+ entry_remains = entry_num % entry_num_max;
+
+ for (i = 0; i < entry_cycle; ++i) {
+ p_entry = p_acl_entry_arr + entry_num_max * i;
+ rc = zxdh_np_dtb_acl_dma_insert_cycle(dev_id,
+ queue_id,
+ sdt_no,
+ entry_num_max,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert_cycle");
+ }
+
+ if (entry_remains) {
+ p_entry = p_acl_entry_arr + entry_num_max * entry_cycle;
+ rc = zxdh_np_dtb_acl_dma_insert_cycle(dev_id,
+ queue_id,
+ sdt_no,
+ entry_remains,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert_cycle");
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_data_clear(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t data_byte_size = 0;
+ uint32_t index = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t as_enable = 0;
+ uint32_t element_id = 0;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_entry_arr = NULL;
+
+ uint8_t *data_buff = NULL;
+ uint8_t *mask_buff = NULL;
+ uint32_t *eram_buff = NULL;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ as_enable = sdt_etcam_info.as_en;
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+
+ p_entry_arr = (ZXDH_DTB_ACL_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+ if (p_entry_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, data_byte_size, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ mask_buff = (uint8_t *)rte_zmalloc(NULL, data_byte_size, 0);
+ if (mask_buff == NULL) {
+ PMD_DRV_LOG(ERR, "mask_buff point null!");
+ rte_free(data_buff);
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ if (as_enable) {
+ eram_buff = (uint32_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (eram_buff == NULL) {
+ PMD_DRV_LOG(ERR, "eram_buff point null!");
+ rte_free(mask_buff);
+ rte_free(data_buff);
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+ memset(eram_buff, 0, 4 * sizeof(uint32_t));
+ }
+
+ for (index = 0; index < index_num; index++) {
+ p_entry_arr[index].handle = p_index_array[index];
+ p_entry_arr[index].key_data = data_buff;
+ p_entry_arr[index].key_mask = mask_buff;
+
+ if (as_enable)
+ p_entry_arr[index].p_as_rslt = (uint8_t *)eram_buff;
+ }
+
+ rc = zxdh_np_dtb_acl_dma_insert(dev_id,
+ queue_id,
+ sdt_no,
+ index_num,
+ p_entry_arr,
+ &element_id);
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ if (eram_buff)
+ rte_free(eram_buff);
+
+ rte_free(p_entry_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_index_release_by_vport(uint32_t dev_id,
+ uint32_t sdt_no,
+ uint32_t vport)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+
+ rc = zxdh_np_agent_channel_acl_index_release(dev_id,
+ ZXDH_ACL_INDEX_VPORT_REL, sdt_no, vport, 0);
+ if (rc == ZXDH_ACL_RC_SRH_FAIL)
+ PMD_DRV_LOG(ERR, "ACL_INDEX_VPORT_REL[vport:0x%x] index is not exist.", vport);
+
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_smmu0_data_write_cycle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t smmu0_base_addr,
+ uint32_t smmu0_wr_mode,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t item_cnt = 0;
+ uint32_t addr_offset = 0;
+ uint32_t dtb_len = 0;
+ uint32_t index = 0;
+
+ uint32_t *p_entry_data = NULL;
+ uint8_t *table_data_buff = NULL;
+ uint32_t entry_data_buff[4] = {0};
+ uint8_t cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ ZXDH_DTB_ENTRY_T dtb_one_entry = {0};
+
+ table_data_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0);
+ if (table_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ dtb_one_entry.cmd = cmd_buff;
+ dtb_one_entry.data = (uint8_t *)entry_data_buff;
+
+ for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) {
+ p_entry_data = (uint32_t *)p_entry_arr[item_cnt].p_data;
+ index = p_entry_arr[item_cnt].index;
+
+ rc = zxdh_np_dtb_se_smmu0_ind_write(dev_id,
+ smmu0_base_addr,
+ index,
+ smmu0_wr_mode,
+ p_entry_data,
+ &dtb_one_entry);
+
+ switch (smmu0_wr_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ dtb_len += 2;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP * 2;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ dtb_len += 1;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ dtb_len += 1;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP;
+ break;
+ }
+
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset, &dtb_one_entry);
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_data_buff, 0, 4 * sizeof(uint32_t));
+ }
+
+ rc = zxdh_np_dtb_write_down_table_data(dev_id,
+ queue_id,
+ dtb_len * 16,
+ table_data_buff,
+ element_id);
+ rte_free(table_data_buff);
+
+ rc = zxdh_np_dtb_tab_down_success_status_check(dev_id, queue_id, *element_id);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_smmu0_data_write(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t smmu0_base_addr,
+ uint32_t smmu0_wr_mode,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint32_t entry_num_max = 0;
+ uint32_t entry_cycle = 0;
+ uint32_t entry_remains = 0;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry = NULL;
+
+ switch (smmu0_wr_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ entry_num_max = 0x1ff;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ entry_num_max = 0x3ff;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ entry_num_max = 0x3ff;
+ break;
+ }
+
+ entry_cycle = entry_num / entry_num_max;
+ entry_remains = entry_num % entry_num_max;
+
+ for (i = 0; i < entry_cycle; ++i) {
+ p_entry = p_entry_arr + entry_num_max * i;
+ rc = zxdh_np_dtb_smmu0_data_write_cycle(dev_id,
+ queue_id,
+ smmu0_base_addr,
+ smmu0_wr_mode,
+ entry_num_max,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write_cycle");
+ }
+
+ if (entry_remains) {
+ p_entry = p_entry_arr + entry_num_max * entry_cycle;
+ rc = zxdh_np_dtb_smmu0_data_write_cycle(dev_id,
+ queue_id,
+ smmu0_base_addr,
+ smmu0_wr_mode,
+ entry_remains,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write_cycle");
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_dma_write(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t wrt_mode;
+ uint32_t base_addr;
+
+ ZXDH_SDT_TBL_ERAM_T sdt_eram_info = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ base_addr = sdt_eram_info.eram_base_addr;
+ wrt_mode = sdt_eram_info.eram_mode;
+
+ switch (wrt_mode) {
+ case ZXDH_ERAM128_TBL_128b:
+ wrt_mode = ZXDH_ERAM128_OPR_128b;
+ break;
+ case ZXDH_ERAM128_TBL_64b:
+ wrt_mode = ZXDH_ERAM128_OPR_64b;
+ break;
+ case ZXDH_ERAM128_TBL_1b:
+ wrt_mode = ZXDH_ERAM128_OPR_1b;
+ break;
+ }
+
+ rc = zxdh_np_dtb_smmu0_data_write(dev_id,
+ queue_id,
+ base_addr,
+ wrt_mode,
+ entry_num,
+ p_entry_arr,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write");
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_data_clear(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t element_id = 0;
+ uint32_t i = 0;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+
+ p_eram_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_eram_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_eram_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_eram_data_arr[i].index = p_index_array[i];
+ p_eram_data_arr[i].p_data = (uint32_t *)data_buff;
+ }
+
+ rc = zxdh_np_dtb_eram_dma_write(dev_id, queue_id,
+ sdt_no, index_num, p_eram_data_arr, &element_id);
+ rte_free(data_buff);
+ rte_free(p_eram_data_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_eram_dma_write");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_stat_data_clear(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t counter_id,
+ ZXDH_STAT_CNT_MODE_E rd_mode,
+ uint32_t index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t element_id = 0;
+ uint32_t i = 0;
+ uint32_t wrt_mode = 0;
+ uint32_t start_addr = 0;
+ uint32_t counter_id_128bit = 0;
+
+ ZXDH_PPU_STAT_CFG_T stat_cfg = {0};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+
+ zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg);
+
+ p_eram_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_eram_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_eram_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_eram_data_arr[i].index = p_index_array[i];
+ p_eram_data_arr[i].p_data = (uint32_t *)data_buff;
+ }
+
+ wrt_mode = (rd_mode == ZXDH_STAT_128_MODE) ? ZXDH_ERAM128_OPR_128b : ZXDH_ERAM128_OPR_64b;
+ counter_id_128bit = (rd_mode == ZXDH_STAT_128_MODE) ? counter_id : (counter_id >> 1);
+ start_addr = stat_cfg.eram_baddr + counter_id_128bit;
+ rc = zxdh_np_dtb_smmu0_data_write(dev_id,
+ queue_id,
+ start_addr,
+ wrt_mode,
+ index_num,
+ p_eram_data_arr,
+ &element_id);
+ rte_free(data_buff);
+ rte_free(p_eram_data_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write");
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_offline_delete(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t counter_id,
+ uint32_t rd_mode)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index_num = 0;
+ uint32_t eram_sdt_no = 0;
+ uint32_t *p_index_array = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%d] table_type[ %d ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ p_index_array = (uint32_t *)rte_zmalloc(NULL,
+ sizeof(uint32_t) * sdt_eram.eram_table_depth, 0);
+ if (p_index_array == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_acl_index_parse(dev_id, queue_id,
+ eram_sdt_no, vport, &index_num, p_index_array);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl index parse failed");
+ return ZXDH_ERR;
+ }
+
+ if (!index_num) {
+ PMD_DRV_LOG(ERR, "SDT[%d] vport[0x%x] item num is zero!", sdt_no, vport);
+ rte_free(p_index_array);
+ return ZXDH_OK;
+ }
+
+ rc = zxdh_np_dtb_acl_data_clear(dev_id, queue_id, sdt_no, index_num, p_index_array);
+ rc = zxdh_np_dtb_eram_data_clear(dev_id, queue_id, eram_sdt_no, index_num, p_index_array);
+ rc = zxdh_np_dtb_eram_stat_data_clear(dev_id, queue_id,
+ counter_id, rd_mode, index_num, p_index_array);
+ rte_free(p_index_array);
+
+ rc = zxdh_np_dtb_acl_index_release_by_vport(dev_id, sdt_no, vport);
+
+ return rc;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 1b8f17474d..a463a6bb49 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -1751,6 +1751,15 @@ typedef enum zxdh_profile_type {
CAR_MAX
} ZXDH_PROFILE_TYPE;
+typedef enum zxdh_msg_acl_index_oper_e {
+ ZXDH_ACL_INDEX_REQUEST = 0,
+ ZXDH_ACL_INDEX_RELEASE = 1,
+ ZXDH_ACL_INDEX_VPORT_REL = 2,
+ ZXDH_ACL_INDEX_ALL_REL = 3,
+ ZXDH_ACL_INDEX_STAT_CLR = 4,
+ ZXDH_ACL_INDEX_MAX
+} ZXDH_MSG_ACL_INDEX_OPER_E;
+
typedef struct __rte_aligned(2) zxdh_version_compatible_reg_t {
uint8_t version_compatible_item;
uint8_t major;
@@ -1915,6 +1924,18 @@ typedef struct zxdh_dtb_dump_index_t {
uint32_t index_type;
} ZXDH_DTB_DUMP_INDEX_T;
+typedef struct __rte_aligned(2) zxdh_agent_channel_acl_msg_t {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t oper;
+ uint8_t rsv;
+ uint32_t sdt_no;
+ uint32_t vport;
+ uint32_t index;
+ uint32_t counter_id;
+ uint32_t rd_mode;
+} ZXDH_AGENT_CHANNEL_ACL_MSG_T;
+
int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
@@ -1958,5 +1979,24 @@ uint32_t zxdh_np_dtb_hash_offline_delete(uint32_t dev_id,
uint32_t queue_id,
uint32_t sdt_no,
__rte_unused uint32_t flush_mode);
-
+uint32_t zxdh_np_dtb_acl_index_request(uint32_t dev_id, uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t *p_index);
+
+uint32_t zxdh_np_dtb_acl_index_release(uint32_t dev_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t index);
+uint32_t zxdh_np_dtb_acl_table_dump_by_vport(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t *entry_num,
+ uint8_t *p_dump_data);
+uint32_t zxdh_np_dtb_acl_offline_delete(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t vport,
+ uint32_t counter_id,
+ uint32_t rd_mode);
#endif /* ZXDH_NP_H */
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 156374 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v1 2/2] net/zxdh: add support flow director ops
2025-06-17 9:31 [PATCH v1 0/2] add support flow director ops Bingbin Chen
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
@ 2025-06-17 9:32 ` Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 0/2] " Bingbin Chen
2 siblings, 0 replies; 9+ messages in thread
From: Bingbin Chen @ 2025-06-17 9:32 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 101891 bytes --]
Provide support for ETH, VLAN, IPv4/IPv6, TCP/UDP, VXLAN, and mask matching,
supporting multiple actions include drop/count/mark/queue/rss,and vxlan decap/encap.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
doc/guides/nics/features/zxdh.ini | 16 +
doc/guides/nics/zxdh.rst | 1 +
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_common.h | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 27 +
drivers/net/zxdh/zxdh_ethdev.h | 13 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 2 +-
drivers/net/zxdh/zxdh_ethdev_ops.h | 1 +
drivers/net/zxdh/zxdh_flow.c | 2004 ++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_flow.h | 237 ++++
drivers/net/zxdh/zxdh_msg.c | 263 +++-
drivers/net/zxdh/zxdh_msg.h | 31 +-
drivers/net/zxdh/zxdh_tables.h | 10 +-
13 files changed, 2536 insertions(+), 71 deletions(-)
create mode 100644 drivers/net/zxdh/zxdh_flow.c
create mode 100644 drivers/net/zxdh/zxdh_flow.h
diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 277e17a584..bd20838676 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -34,5 +34,21 @@ Extended stats = Y
FW version = Y
Module EEPROM dump = Y
+[rte_flow items]
+eth = Y
+ipv4 = Y
+ipv6 = Y
+sctp = Y
+tcp = Y
+udp = Y
+vlan = Y
+vxlan = Y
+
[rte_flow actions]
drop = Y
+count = Y
+mark = Y
+queue = Y
+rss = Y
+vxlan_decap = Y
+vxlan_encap = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 372cb5b44f..47dabde97e 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -41,6 +41,7 @@ Features of the ZXDH PMD are:
- Hardware TSO for generic IP or UDP tunnel, including VXLAN
- Extended statistics query
- Ingress meter support
+- Flow API
Driver compilation and testing
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index a48a0d43c2..120cac5879 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -24,4 +24,5 @@ sources = files(
'zxdh_rxtx.c',
'zxdh_ethdev_ops.c',
'zxdh_mtr.c',
+ 'zxdh_flow.c',
)
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
index c151101bbc..6d78ae0273 100644
--- a/drivers/net/zxdh/zxdh_common.h
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -14,6 +14,7 @@
#define ZXDH_VF_LOCK_REG 0x90
#define ZXDH_VF_LOCK_ENABLE_MASK 0x1
#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX 10
+#define VF_IDX(pcie_id) ((pcie_id) & 0xff)
struct zxdh_res_para {
uint64_t virt_addr;
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 80053678cb..3b9cb6fa63 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1228,6 +1228,11 @@ zxdh_dev_close(struct rte_eth_dev *dev)
return -1;
}
+ if (zxdh_shared_data != NULL) {
+ zxdh_mtr_release(dev);
+ zxdh_flow_release(dev);
+ }
+
zxdh_intr_release(dev);
zxdh_np_uninit(dev);
zxdh_pci_reset(hw);
@@ -1428,6 +1433,7 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
.get_module_eeprom = zxdh_dev_get_module_eeprom,
.dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,
.mtr_ops_get = zxdh_meter_ops_get,
+ .flow_ops_get = zxdh_flow_ops_get,
};
static int32_t
@@ -1504,6 +1510,8 @@ zxdh_dtb_dump_res_init(struct zxdh_hw *hw, ZXDH_DEV_INIT_CTRL_T *dpp_ctrl)
{"sdt_mc_table1", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE1, NULL},
{"sdt_mc_table2", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE2, NULL},
{"sdt_mc_table3", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE3, NULL},
+ {"sdt_acl_index_mng", 4 * 1024 * 1024, 30, NULL},
+ {"sdt_fd_table", 4 * 1024 * 1024, ZXDH_SDT_FD_TABLE, NULL},
};
struct zxdh_dev_shared_data *dev_sd = hw->dev_sd;
@@ -1723,6 +1731,7 @@ zxdh_free_sh_res(void)
rte_spinlock_lock(&zxdh_shared_data_lock);
if (zxdh_shared_data != NULL && zxdh_shared_data->init_done &&
(--zxdh_shared_data->dev_refcnt == 0)) {
+ rte_mempool_free(zxdh_shared_data->flow_mp);
rte_mempool_free(zxdh_shared_data->mtr_mp);
rte_mempool_free(zxdh_shared_data->mtr_profile_mp);
rte_mempool_free(zxdh_shared_data->mtr_policy_mp);
@@ -1734,6 +1743,7 @@ zxdh_free_sh_res(void)
static int
zxdh_init_sh_res(struct zxdh_shared_data *sd)
{
+ const char *MZ_ZXDH_FLOW_MP = "zxdh_flow_mempool";
const char *MZ_ZXDH_MTR_MP = "zxdh_mtr_mempool";
const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";
const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";
@@ -1743,6 +1753,13 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)
struct rte_mempool *mtr_policy_mp = NULL;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ flow_mp = rte_mempool_create(MZ_ZXDH_FLOW_MP, ZXDH_MAX_FLOW_NUM,
+ sizeof(struct zxdh_flow), 64, 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (flow_mp == NULL) {
+ PMD_DRV_LOG(ERR, "Cannot allocate zxdh flow mempool");
+ goto error;
+ }
mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, ZXDH_MAX_MTR_NUM,
sizeof(struct zxdh_mtr_object), 64, 0,
NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
@@ -1765,6 +1782,7 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)
PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
goto error;
}
+ sd->flow_mp = flow_mp;
sd->mtr_mp = mtr_mp;
sd->mtr_profile_mp = mtr_profile_mp;
sd->mtr_policy_mp = mtr_policy_mp;
@@ -1814,6 +1832,7 @@ zxdh_init_once(struct rte_eth_dev *eth_dev)
ret = zxdh_init_sh_res(sd);
if (ret != 0)
goto out;
+ zxdh_flow_global_init();
rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);
memset(&g_mtr_res, 0, sizeof(g_mtr_res));
sd->init_done = true;
@@ -1837,10 +1856,17 @@ zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)
ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);
if (ret)
PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);
+
sdt_no = ZXDH_SDT_MC_TABLE0 + hw->hash_search_index;
ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);
if (ret)
PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);
+
+ ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport,
+ ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow offline delete failed. code:%d", ret);
}
return ret;
}
@@ -2064,6 +2090,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
if (ret)
goto err_zxdh_init;
+ zxdh_flow_init(eth_dev);
zxdh_queue_res_get(eth_dev);
zxdh_msg_cb_reg(hw);
if (zxdh_priv_res_init(hw) != 0)
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 169af209a2..8e465d66b6 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -11,6 +11,7 @@
#include <eal_interrupts.h>
#include "zxdh_mtr.h"
+#include "zxdh_flow.h"
/* ZXDH PCI vendor/device ID. */
#define ZXDH_PCI_VENDOR_ID 0x1cf2
@@ -54,6 +55,7 @@
#define ZXDH_SLOT_MAX 256
#define ZXDH_MAX_VF 256
#define ZXDH_HASHIDX_MAX 6
+#define ZXDH_RSS_HASH_KEY_LEN 40U
union zxdh_virport_num {
uint16_t vport;
@@ -129,7 +131,10 @@ struct zxdh_hw {
uint8_t is_pf : 1,
rsv : 1,
i_mtr_en : 1,
- e_mtr_en : 1;
+ e_mtr_en : 1,
+ i_flow_en : 1,
+ e_flow_en : 1,
+ vxlan_flow_en : 1;
uint8_t msg_chan_init;
uint8_t phyport;
uint8_t panel_id;
@@ -149,7 +154,10 @@ struct zxdh_hw {
uint16_t queue_pool_count;
uint16_t queue_pool_start;
uint8_t dl_net_hdr_len;
- uint8_t rsv1[3];
+ uint16_t vxlan_fd_num;
+ uint8_t rsv1[1];
+
+ struct dh_flow_list dh_flow_list;
};
struct zxdh_dtb_shared_data {
@@ -174,6 +182,7 @@ struct zxdh_shared_data {
int32_t np_init_done;
uint32_t dev_refcnt;
struct zxdh_dtb_shared_data *dtb_data;
+ struct rte_mempool *flow_mp;
struct rte_mempool *mtr_mp;
struct rte_mempool *mtr_profile_mp;
struct rte_mempool *mtr_policy_mp;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index f8e8d26c50..10a174938e 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -1056,7 +1056,7 @@ zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
return ret;
}
-static uint16_t
+uint16_t
zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid)
{
struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 97a1eb4532..a83b808934 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -141,5 +141,6 @@ int zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw
int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *modinfo);
int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
int zxdh_meter_ops_get(struct rte_eth_dev *dev, void *arg);
+uint16_t zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid);
#endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_flow.c b/drivers/net/zxdh/zxdh_flow.c
new file mode 100644
index 0000000000..7feb2beb31
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.c
@@ -0,0 +1,2004 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_logs.h"
+#include "zxdh_flow.h"
+#include "zxdh_tables.h"
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_np.h"
+#include "zxdh_msg.h"
+
+#define ZXDH_IPV6_FRAG_HEADER 44
+#define ZXDH_TENANT_ARRAY_NUM 3
+#define ZXDH_VLAN_TCI_MASK 0xFFFF
+#define ZXDH_VLAN_PRI_MASK 0xE000
+#define ZXDH_VLAN_CFI_MASK 0x1000
+#define ZXDH_VLAN_VID_MASK 0x0FFF
+#define MAX_STRING_LEN 8192
+#define FLOW_INGRESS 0
+#define FLOW_EGRESS 1
+#define MAX_ENCAP1_NUM (256)
+#define INVALID_HANDLEIDX 0xffff
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM (6)
+static struct dh_engine_list flow_engine_list = TAILQ_HEAD_INITIALIZER(flow_engine_list);
+static struct count_res flow_count_ref[MAX_FLOW_COUNT_NUM];
+static rte_spinlock_t fd_hw_res_lock = RTE_SPINLOCK_INITIALIZER;
+static uint8_t fd_hwres_bitmap[ZXDH_MAX_FLOW_NUM] = {0};
+
+#define MKDUMPSTR(buf, buf_size, cur_len, ...) \
+do { \
+ if ((cur_len) >= (buf_size)) \
+ break; \
+ (cur_len) += snprintf((buf) + (cur_len), (buf_size) - (cur_len), __VA_ARGS__); \
+} while (0)
+
+static inline void
+print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr,
+ char print_buf[], uint32_t buf_size, uint32_t *cur_len)
+{
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf);
+}
+
+static inline void
+zxdh_fd_flow_free_dtbentry(ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ rte_free(dtb_entry->p_entry_data);
+ dtb_entry->p_entry_data = NULL;
+ dtb_entry->sdt_no = 0;
+}
+
+static void
+data_bitwise(void *data, int bytecnt)
+{
+ int i;
+ uint32_t *temp = (uint32_t *)data;
+ int remain = bytecnt % 4;
+ for (i = 0; i < (bytecnt >> 2); i++) {
+ *(temp) = ~*(temp);
+ temp++;
+ }
+
+ if (remain) {
+ for (i = 0; i < remain; i++) {
+ uint8_t *tmp = (uint8_t *)temp;
+ *(uint8_t *)tmp = ~*(uint8_t *)tmp;
+ tmp++;
+ }
+ }
+}
+
+static void
+zxdh_adjust_flow_op_rsp_memory_layout(void *old_data,
+ size_t old_size, void *new_data)
+{
+ rte_memcpy(new_data, old_data, sizeof(struct zxdh_flow));
+ memset((char *)new_data + sizeof(struct zxdh_flow), 0, 4);
+ rte_memcpy((char *)new_data + sizeof(struct zxdh_flow) + 4,
+ (char *)old_data + sizeof(struct zxdh_flow),
+ old_size - sizeof(struct zxdh_flow));
+}
+
+void zxdh_flow_global_init(void)
+{
+ int i;
+ for (i = 0; i < MAX_FLOW_COUNT_NUM; i++) {
+ rte_spinlock_init(&flow_count_ref[i].count_lock);
+ flow_count_ref[i].count_ref = 0;
+ }
+}
+
+static void
+__entry_dump(char *print_buf, uint32_t buf_size,
+ uint32_t *cur_len, struct fd_flow_key *key)
+{
+ print_ether_addr("\nL2\t dst=", &key->mac_dst, print_buf, buf_size, cur_len);
+ print_ether_addr(" - src=", &key->mac_src, print_buf, buf_size, cur_len);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -eth type=0x%04x", key->ether_type);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " -vlan_pri=0x%02x -vlan_vlanid=0x%04x -vlan_tci=0x%04x ",
+ key->cvlan_pri, key->cvlan_vlanid, key->vlan_tci);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " -vni=0x%02x 0x%02x 0x%02x\n", key->vni[0], key->vni[1], key->vni[2]);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "L3\t dstip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",
+ *(uint32_t *)key->dst_ip, *((uint32_t *)key->dst_ip + 1),
+ *((uint32_t *)key->dst_ip + 2),
+ *((uint32_t *)key->dst_ip + 3),
+ IPv6_BYTES(key->dst_ip));
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "\t srcip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",
+ *((uint32_t *)key->src_ip), *((uint32_t *)key->src_ip + 1),
+ *((uint32_t *)key->src_ip + 2),
+ *((uint32_t *)key->src_ip + 3),
+ IPv6_BYTES(key->src_ip));
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " \t tos=0x%02x -nw-proto=0x%02x -frag-flag %u\n",
+ key->tos, key->nw_proto, key->frag_flag);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "L4\t dstport=0x%04x -srcport=0x%04x", key->tp_dst, key->tp_src);
+}
+
+static void
+__result_dump(char *print_buf, uint32_t buf_size,
+ uint32_t *cur_len, struct fd_flow_result *res)
+{
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -hit_flag = 0x%04x", res->hit_flag);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -action_idx = 0x%02x", res->action_idx);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -qid = 0x%04x", res->qid);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -mark_id = 0x%08x", res->mark_fd_id);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -count_id = 0x%02x", res->countid);
+}
+
+static void offlow_key_dump(struct fd_flow_key *key, struct fd_flow_key *key_mask, FILE *file)
+{
+ char print_buf[MAX_STRING_LEN];
+ uint32_t buf_size = MAX_STRING_LEN;
+ uint32_t cur_len = 0;
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "offload key:\n\t");
+ __entry_dump(print_buf, buf_size, &cur_len, key);
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\noffload key_mask:\n\t");
+ __entry_dump(print_buf, buf_size, &cur_len, key_mask);
+
+ PMD_DRV_LOG(INFO, "%s", print_buf);
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+ if (file)
+ fputs(print_buf, file);
+}
+
+static void offlow_result_dump(struct fd_flow_result *res, FILE *file)
+{
+ char print_buf[MAX_STRING_LEN];
+ uint32_t buf_size = MAX_STRING_LEN;
+ uint32_t cur_len = 0;
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "offload result:\n");
+ __result_dump(print_buf, buf_size, &cur_len, res);
+ PMD_DRV_LOG(INFO, "%s", print_buf);
+ PMD_DRV_LOG(INFO, "memdump : ===result ===");
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+ if (file)
+ fputs(print_buf, file);
+}
+
+static int
+set_flow_enable(struct rte_eth_dev *dev, uint8_t dir,
+ bool enable, struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_port_attr_table port_attr = {0};
+ int ret = 0;
+
+ if (priv->is_pf) {
+ ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get port_attr failed");
+ return -1;
+ }
+ port_attr.fd_enable = enable;
+
+ ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "write port_attr failed");
+ return -1;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+ attr_msg->mode = ZXDH_PORT_FD_EN_OFF_FLAG;
+ attr_msg->value = enable;
+ zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+ }
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow enable failed", priv->port_id);
+ return -rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Meter enable failed.");
+ }
+ if (dir == FLOW_INGRESS)
+ priv->i_flow_en = !!enable;
+ else
+ priv->e_flow_en = !!enable;
+
+ return ret;
+}
+
+static int
+set_vxlan_enable(struct rte_eth_dev *dev, bool enable, struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_port_attr_table port_attr = {0};
+ int ret = 0;
+
+ if (priv->vxlan_flow_en == !!enable)
+ return 0;
+ if (priv->is_pf) {
+ ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get port_attr failed");
+ return -1;
+ }
+ port_attr.fd_enable = enable;
+
+ ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "write port_attr failed");
+ return -1;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+ attr_msg->mode = ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF;
+ attr_msg->value = enable;
+
+ zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+ }
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d vxlan flow enable failed", priv->port_id);
+ return -rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "vxlan offload enable failed.");
+ }
+ priv->vxlan_flow_en = !!enable;
+ return ret;
+}
+
+void zxdh_register_flow_engine(struct dh_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&flow_engine_list, engine, node);
+}
+
+static void zxdh_flow_free(struct zxdh_flow *dh_flow)
+{
+ if (dh_flow)
+ rte_mempool_put(zxdh_shared_data->flow_mp, dh_flow);
+}
+
+static struct dh_flow_engine *zxdh_get_flow_engine(struct rte_eth_dev *dev __rte_unused)
+{
+ struct dh_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &flow_engine_list, node, temp) {
+ if (engine->type == FLOW_TYPE_FD_TCAM)
+ break;
+ }
+ return engine;
+}
+
+static int
+zxdh_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct dh_flow_engine *flow_engine = NULL;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL || flow_engine->parse_pattern_action == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+ if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, NULL) != 0)
+ return -rte_errno;
+ return 0;
+}
+
+static struct zxdh_flow *flow_exist_check(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *entry;
+ struct zxdh_flow *entry_flow;
+
+ TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {
+ entry_flow = (struct zxdh_flow *)entry->driver_flow;
+ if ((memcmp(&entry_flow->flowentry.fd_flow.key, &dh_flow->flowentry.fd_flow.key,
+ sizeof(struct fd_flow_key)) == 0) &&
+ (memcmp(&entry_flow->flowentry.fd_flow.key_mask,
+ &dh_flow->flowentry.fd_flow.key_mask,
+ sizeof(struct fd_flow_key)) == 0)) {
+ return entry_flow;
+ }
+ }
+ return NULL;
+}
+
+static struct rte_flow *
+zxdh_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ struct zxdh_flow *dh_flow = NULL;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+
+ if (flow_engine == NULL ||
+ flow_engine->parse_pattern_action == NULL ||
+ flow_engine->apply == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return NULL;
+ }
+
+ flow = rte_zmalloc("rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow malloc failed");
+ return NULL;
+ }
+ ret = rte_mempool_get(zxdh_shared_data->flow_mp, (void **)&dh_flow);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory from flowmp");
+ goto free_flow;
+ }
+ memset(dh_flow, 0, sizeof(struct zxdh_flow));
+ if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, dh_flow) != 0) {
+ PMD_DRV_LOG(ERR, "parse_pattern_action failed zxdh_created failed");
+ goto free_flow;
+ }
+
+ if (flow_exist_check(dev, dh_flow) != NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "flow repeat .no add again");
+ goto free_flow;
+ }
+
+ ret = flow_engine->apply(dev, dh_flow, error, hw->vport.vport, hw->pcie_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "apply failed flow created failed");
+ goto free_flow;
+ }
+ flow->driver_flow = dh_flow;
+ flow->port_id = dev->data->port_id;
+ flow->type = ZXDH_FLOW_GROUP_TCAM;
+ TAILQ_INSERT_TAIL(&hw->dh_flow_list, flow, next);
+
+ if (hw->i_flow_en == 0) {
+ ret = set_flow_enable(dev, FLOW_INGRESS, 1, error);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "set flow enable failed");
+ goto free_flow;
+ }
+ }
+ return flow;
+free_flow:
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+zxdh_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_flow *dh_flow = NULL;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL ||
+ flow_engine->destroy == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "invalid flow");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid flow");
+ return -1;
+ }
+ ret = flow_engine->destroy(dev, dh_flow, error, priv->vport.vport, priv->pcie_id);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ return -rte_errno;
+ }
+ TAILQ_REMOVE(&priv->dh_flow_list, flow, next);
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+
+ if (TAILQ_EMPTY(&priv->dh_flow_list)) {
+ ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "clear flow enable failed");
+ return -rte_errno;
+ }
+ }
+ return ret;
+}
+
+
+static int
+zxdh_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data, struct rte_flow_error *error)
+{
+ struct zxdh_flow *dh_flow;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+
+ if (flow_engine == NULL ||
+ flow_engine->query_count == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "flow is not exist");
+ return -1;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_engine->query_count(dev, dh_flow,
+ (struct rte_flow_query_count *)data, error);
+ break;
+ default:
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ goto out;
+ }
+ }
+out:
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow query failed");
+ return ret;
+}
+
+static int zxdh_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ struct zxdh_flow *dh_flow = NULL;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
+ struct dh_flow_engine *flow_engine = NULL;
+ struct zxdh_msg_info msg_info = {0};
+ uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+ int ret = 0;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL) {
+ PMD_DRV_LOG(ERR, "get flow engine failed");
+ return -1;
+ }
+ ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "clear flow enable failed");
+ return ret;
+ }
+
+ ret = set_vxlan_enable(dev, 0, error);
+ if (ret)
+ PMD_DRV_LOG(ERR, "clear vxlan enable failed");
+ hw->vxlan_fd_num = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport,
+ ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s flush failed. code:%d", dev->data->name, ret);
+ } else {
+ zxdh_msg_head_build(hw, ZXDH_FLOW_HW_FLUSH, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+ (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow op %d flush failed ret %d",
+ hw->port_id, ZXDH_FLOW_HW_FLUSH, ret);
+ return -1;
+ }
+ }
+
+ /* Remove all flows */
+ while ((flow = TAILQ_FIRST(&hw->dh_flow_list))) {
+ TAILQ_REMOVE(&hw->dh_flow_list, flow, next);
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid flow Failed to destroy flow.");
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Invalid flow ,flush failed");
+ return ret;
+ }
+
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+ }
+ return ret;
+}
+
+static void
+handle_res_dump(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ uint16_t hwres_base = priv->vport.pfid << 10;
+ uint16_t hwres_cnt = ZXDH_MAX_FLOW_NUM >> 1;
+ uint16_t i;
+
+ PMD_DRV_LOG(DEBUG, "hwres_base %d", hwres_base);
+ rte_spinlock_lock(&fd_hw_res_lock);
+ for (i = 0; i < hwres_cnt; i++) {
+ if (fd_hwres_bitmap[hwres_base + i] == 1)
+ PMD_DRV_LOG(DEBUG, "used idx %d", i + hwres_base);
+ }
+ rte_spinlock_unlock(&fd_hw_res_lock);
+}
+
+static int
+zxdh_flow_dev_dump(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ FILE *file,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *entry;
+ struct zxdh_flow *entry_flow;
+ uint32_t dtb_qid = 0;
+ uint32_t entry_num = 0;
+ uint16_t ret = 0;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *fd_entry = NULL;
+ uint8_t *key = NULL;
+ uint8_t *key_mask = NULL;
+ uint8_t *result = NULL;
+
+ if (flow) {
+ entry_flow = flow_exist_check(dev, (struct zxdh_flow *)flow->driver_flow);
+ if (entry_flow) {
+ PMD_DRV_LOG(DEBUG, "handle idx %d:", entry_flow->flowentry.hw_idx);
+ offlow_key_dump(&entry_flow->flowentry.fd_flow.key,
+ &entry_flow->flowentry.fd_flow.key_mask, file);
+ offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);
+ }
+ } else {
+ if (hw->is_pf) {
+ dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ fd_entry = rte_malloc(NULL,
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T) * ZXDH_MAX_FLOW_NUM, 1);
+ key = rte_malloc(NULL, sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);
+ key_mask = rte_malloc(NULL,
+ sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);
+ result = rte_malloc(NULL,
+ sizeof(struct fd_flow_result) * ZXDH_MAX_FLOW_NUM, 1);
+ if (!fd_entry || !key || !key_mask || !result) {
+ PMD_DRV_LOG(ERR, "fd_entry malloc failed!");
+ goto end;
+ }
+
+ for (int i = 0; i < ZXDH_MAX_FLOW_NUM; i++) {
+ fd_entry[i].key_data = key + i * sizeof(struct fd_flow_key);
+ fd_entry[i].key_mask = key_mask + i * sizeof(struct fd_flow_key);
+ fd_entry[i].p_as_rslt = result + i * sizeof(struct fd_flow_result);
+ }
+ ret = zxdh_np_dtb_acl_table_dump_by_vport(hw->dev_id, dtb_qid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport, &entry_num,
+ (uint8_t *)fd_entry);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpp_dtb_acl_table_dump_by_vport failed!");
+ goto end;
+ }
+ for (uint32_t i = 0; i < entry_num; i++) {
+ offlow_key_dump((struct fd_flow_key *)fd_entry[i].key_data,
+ (struct fd_flow_key *)fd_entry[i].key_mask, file);
+ offlow_result_dump((struct fd_flow_result *)fd_entry[i].p_as_rslt,
+ file);
+ }
+ rte_free(result);
+ rte_free(key_mask);
+ rte_free(key);
+ rte_free(fd_entry);
+ } else {
+ entry = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+ entry_flow = rte_malloc(NULL, sizeof(struct zxdh_flow), 0);
+ TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {
+ entry_flow = (struct zxdh_flow *)entry->driver_flow;
+ offlow_key_dump(&entry_flow->flowentry.fd_flow.key,
+ &entry_flow->flowentry.fd_flow.key_mask, file);
+ offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);
+ }
+ rte_free(entry_flow);
+ rte_free(entry);
+ }
+ }
+ handle_res_dump(dev);
+
+ return 0;
+end:
+ rte_free(result);
+ rte_free(key_mask);
+ rte_free(key);
+ rte_free(fd_entry);
+ return -1;
+}
+
+static int32_t
+get_available_handle(struct zxdh_hw *hw, uint16_t vport)
+{
+ int ret = 0;
+ uint32_t handle_idx = 0;
+
+ ret = zxdh_np_dtb_acl_index_request(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, &handle_idx);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for hw!");
+ return INVALID_HANDLEIDX;
+ }
+ return handle_idx;
+}
+
+static int free_handle(struct zxdh_hw *hw, uint16_t handle_idx, uint16_t vport)
+{
+ int ret = zxdh_np_dtb_acl_index_release(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, handle_idx);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to free handle_idx %d for hw!", handle_idx);
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zxdh_encap0_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0;
+
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t
+zxdh_encap0_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2 + 1;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0.dip;
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t zxdh_encap1_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ if (dh_flow->encap0.ethtype == 0)
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4;
+ else
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 1;
+
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1;
+
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t
+zxdh_encap1_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+ if (dh_flow->encap0.ethtype == 0)
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 2;
+ else
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 3;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1.sip;
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static int zxdh_hw_encap_insert(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error)
+{
+ uint32_t ret;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+
+ zxdh_encap0_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap0_ip_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap1_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap1_ip_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zxdh_fd_flow_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow_info *fdflow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ACL_ENTRY_INFO_T *dtb_acl_entry;
+ uint16_t handle_idx = 0;
+ dtb_acl_entry = rte_zmalloc("fdflow_dtbentry", sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+
+ if (dtb_acl_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_acl_entry->key_data = (uint8_t *)&fdflow->fd_flow.key;
+ dtb_acl_entry->key_mask = (uint8_t *)&fdflow->fd_flow.key_mask;
+ dtb_acl_entry->p_as_rslt = (uint8_t *)&fdflow->fd_flow.result;
+
+ handle_idx = fdflow->hw_idx;
+
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_free(dtb_acl_entry);
+ return INVALID_HANDLEIDX;
+ }
+ dtb_acl_entry->handle = handle_idx;
+ dtb_entry->sdt_no = ZXDH_SDT_FD_TABLE;
+ dtb_entry->p_entry_data = dtb_acl_entry;
+ return handle_idx;
+}
+
+static int zxdh_hw_flow_insert(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+ uint32_t ret;
+ uint16_t handle_idx;
+
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx == INVALID_HANDLEIDX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for hw");
+ return -1;
+ }
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ ret = free_handle(hw, handle_idx, vport);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "realease handle_idx to hw failed");
+ return -1;
+ }
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+ dh_flow->flowentry.hw_idx = handle_idx;
+ return 0;
+}
+
+static int
+hw_count_query(struct zxdh_hw *hw, uint32_t countid, bool clear,
+ struct flow_stats *fstats, struct rte_flow_error *error)
+{
+ uint32_t stats_id = 0;
+ int ret = 0;
+ stats_id = countid;
+ if (stats_id >= ZXDH_MAX_FLOW_NUM) {
+ PMD_DRV_LOG(DEBUG, "query count id %d invalid", stats_id);
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query count id invalid");
+ return -rte_errno;
+ }
+ PMD_DRV_LOG(DEBUG, "query count id %d,clear %d ", stats_id, clear);
+ if (!clear)
+ ret = zxdh_np_dtb_stats_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, 1,
+ stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,
+ (uint32_t *)fstats);
+ else
+ ret = zxdh_np_stat_ppu_cnt_get_ex(hw->dev_id, 1,
+ stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,
+ 1, (uint32_t *)fstats);
+ if (ret)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "fail to get flow stats");
+ return ret;
+}
+
+static int
+count_deref(struct zxdh_hw *hw, uint32_t countid,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct count_res *count_res = &flow_count_ref[countid];
+ struct flow_stats fstats = {0};
+
+ rte_spinlock_lock(&count_res->count_lock);
+
+ if (count_res->count_ref >= 1) {
+ count_res->count_ref--;
+ } else {
+ rte_spinlock_unlock(&count_res->count_lock);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "count deref underflow");
+ }
+ if (count_res->count_ref == 0)
+ ret = hw_count_query(hw, countid, 1, &fstats, error);
+
+ rte_spinlock_unlock(&count_res->count_lock);
+ return ret;
+}
+
+static int
+count_ref(struct zxdh_hw *hw, uint32_t countid, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct count_res *count_res = &flow_count_ref[countid];
+ struct flow_stats fstats = {0};
+
+ rte_spinlock_lock(&count_res->count_lock);
+ if (count_res->count_ref < 255) {
+ count_res->count_ref++;
+ } else {
+ rte_spinlock_unlock(&count_res->count_lock);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "count ref overflow");
+ }
+
+ if (count_res->count_ref == 1)
+ ret = hw_count_query(hw, countid, 1, &fstats, error);
+
+ rte_spinlock_unlock(&count_res->count_lock);
+ return ret;
+}
+
+int
+pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint8_t vf_index = 0;
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ uint32_t countid = MAX_FLOW_COUNT_NUM;
+ uint32_t handle_idx = 0;
+ union zxdh_virport_num port = {0};
+
+ port.vport = vport;
+ handle_idx = get_available_handle(hw, vport);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to allocate memory for hw");
+ return -1;
+ }
+ dh_flow->flowentry.hw_idx = handle_idx;
+ if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0) {
+ countid = handle_idx;
+ dh_flow->flowentry.fd_flow.result.countid = countid;
+ }
+
+ if ((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) {
+ dh_flow->flowentry.fd_flow.result.encap0_index = handle_idx;
+ if (!port.vf_flag) {
+ dh_flow->flowentry.fd_flow.result.encap1_index =
+ hw->hash_search_index * MAX_ENCAP1_NUM;
+ } else {
+ vf_index = VF_IDX(pcieid);
+ if (vf_index < (ZXDH_MAX_VF - 1)) {
+ dh_flow->flowentry.fd_flow.result.encap1_index =
+ hw->hash_search_index * MAX_ENCAP1_NUM + vf_index + 1;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "encap1 vf_index is too big");
+ return -1;
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "encap_index (%d)(%d)",
+ dh_flow->flowentry.fd_flow.result.encap0_index,
+ dh_flow->flowentry.fd_flow.result.encap1_index);
+ if (zxdh_hw_encap_insert(dev, dh_flow, error) != 0)
+ return -1;
+ }
+ ret = zxdh_hw_flow_insert(dev, dh_flow, error, vport);
+ if (!ret && countid < MAX_FLOW_COUNT_NUM)
+ ret = count_ref(hw, countid, error);
+
+ if (!ret) {
+ if (!port.vf_flag) {
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num++;
+ if (hw->vxlan_fd_num == 1)
+ set_vxlan_enable(dev, 1, error);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+zxdh_hw_flow_del(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport)
+{
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ uint32_t ret;
+ uint16_t handle_idx;
+
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for hw");
+ return -1;
+ }
+ ret = zxdh_np_dtb_table_entry_delete(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "delete to hw failed");
+ return -1;
+ }
+ ret = free_handle(hw, handle_idx, vport);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "realease handle_idx to hw failed");
+ return -1;
+ }
+ PMD_DRV_LOG(DEBUG, "realease handle_idx to hw succ! %d", handle_idx);
+ return ret;
+}
+
+int
+pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport,
+ uint16_t pcieid __rte_unused)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ union zxdh_virport_num port = {0};
+ int ret = 0;
+
+ port.vport = vport;
+ ret = zxdh_hw_flow_del(dev, dh_flow, error, vport);
+ PMD_DRV_LOG(DEBUG, "destroy handle id %d", dh_flow->flowentry.hw_idx);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ uint32_t countid;
+ countid = dh_flow->flowentry.hw_idx;
+ if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0)
+ ret = count_deref(hw, countid, error);
+ if (!port.vf_flag) {
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num--;
+ if (hw->vxlan_fd_num == 0)
+ set_vxlan_enable(dev, 0, error);
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+zxdh_hw_flow_query(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry;
+ uint16_t handle_idx;
+
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to build hw entry for query");
+ ret = -1;
+ goto free_res;
+ }
+ ret = zxdh_np_dtb_table_entry_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, &dtb_entry, 0);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed query entry from hw ");
+ goto free_res;
+ }
+
+free_res:
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+
+ return ret;
+}
+
+int
+pf_fd_hw_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct flow_stats fstats = {0};
+ int ret = 0;
+ uint32_t countid;
+
+ memset(&flow->flowentry.fd_flow.result, 0, sizeof(struct fd_flow_result));
+ ret = zxdh_hw_flow_query(dev, flow, error);
+ if (ret) {
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query failed");
+ return -rte_errno;
+ }
+ countid = flow->flowentry.hw_idx;
+ if (countid >= ZXDH_MAX_FLOW_NUM) {
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query count id invalid");
+ return -rte_errno;
+ }
+ ret = hw_count_query(hw, countid, 0, &fstats, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "fail to get flow stats");
+ return ret;
+ }
+ count->bytes = (uint64_t)(rte_le_to_cpu_32(fstats.hit_bytes_hi)) << 32 |
+ rte_le_to_cpu_32(fstats.hit_bytes_lo);
+ count->hits = (uint64_t)(rte_le_to_cpu_32(fstats.hit_pkts_hi)) << 32 |
+ rte_le_to_cpu_32(fstats.hit_pkts_lo);
+ return ret;
+}
+
+static int
+fd_flow_parse_attr(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct zxdh_flow *dh_flow)
+{
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group >= MAX_GROUP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ if (dh_flow) {
+ dh_flow->group = attr->group;
+ dh_flow->direct = (attr->ingress == 1) ? 0 : 1;
+ dh_flow->pri = attr->priority;
+ }
+
+ return 0;
+}
+
+static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *items,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_flow_info *flow = NULL;
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec = NULL, *ipv6_mask = NULL;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ struct fd_flow_key *key, *key_mask;
+
+ if (dh_flow) {
+ flow = &dh_flow->flowentry;
+ } else {
+ flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory ");
+ return -rte_errno;
+ }
+ }
+
+ key = &flow->fd_flow.key;
+ key_mask = &flow->fd_flow.key_mask;
+ key->vfid = rte_cpu_to_be_16(priv->vfid);
+ key_mask->vfid = 0xffff;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ item = items;
+ if (items->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "Not support range");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ if (eth_spec && eth_mask) {
+ key->mac_dst = eth_spec->dst;
+ key->mac_src = eth_spec->src;
+ key_mask->mac_dst = eth_mask->dst;
+ key_mask->mac_src = eth_mask->src;
+
+ //ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (eth_mask->type == 0xffff) {
+ key->ether_type = eth_spec->type;
+ key_mask->ether_type = eth_mask->type;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (vlan_spec && vlan_mask) {
+ key->vlan_tci = vlan_spec->tci;
+ key_mask->vlan_tci = vlan_mask->tci;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.hdr_checksum ||
+ ipv4_mask->hdr.time_to_live) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+ /* Get the filter info */
+ key->nw_proto =
+ ipv4_spec->hdr.next_proto_id;
+ key->tos =
+ ipv4_spec->hdr.type_of_service;
+ key_mask->nw_proto =
+ ipv4_mask->hdr.next_proto_id;
+ key_mask->tos =
+ ipv4_mask->hdr.type_of_service;
+ key->frag_flag = (ipv4_spec->hdr.fragment_offset != 0) ? 1 : 0;
+ key_mask->frag_flag = (ipv4_mask->hdr.fragment_offset != 0) ? 1 : 0;
+ rte_memcpy((uint32_t *)key->src_ip + 3,
+ &ipv4_spec->hdr.src_addr, 4);
+ rte_memcpy((uint32_t *)key->dst_ip + 3,
+ &ipv4_spec->hdr.dst_addr, 4);
+ rte_memcpy((uint32_t *)key_mask->src_ip + 3,
+ &ipv4_mask->hdr.src_addr, 4);
+ rte_memcpy((uint32_t *)key_mask->dst_ip + 3,
+ &ipv4_mask->hdr.dst_addr, 4);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ key->tc =
+ (uint8_t)((ipv6_spec->hdr.vtc_flow &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT);
+ key_mask->tc =
+ (uint8_t)((ipv6_mask->hdr.vtc_flow &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT);
+
+ key->nw_proto = ipv6_spec->hdr.proto;
+ key_mask->nw_proto = ipv6_mask->hdr.proto;
+
+ rte_memcpy(key->src_ip,
+ &ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(key->dst_ip,
+ &ipv6_spec->hdr.dst_addr, 16);
+ rte_memcpy(key_mask->src_ip,
+ &ipv6_mask->hdr.src_addr, 16);
+ rte_memcpy(key_mask->dst_ip,
+ &ipv6_mask->hdr.dst_addr, 16);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp ||
+ (tcp_mask->hdr.src_port &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ key->tp_src = tcp_spec->hdr.src_port;
+ key_mask->tp_src = tcp_mask->hdr.src_port;
+
+ key->tp_dst = tcp_spec->hdr.dst_port;
+ key_mask->tp_dst = tcp_mask->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum ||
+ (udp_mask->hdr.src_port &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ key->tp_src = udp_spec->hdr.src_port;
+ key_mask->tp_src = udp_mask->hdr.src_port;
+
+ key->tp_dst = udp_spec->hdr.dst_port;
+ key_mask->tp_dst = udp_mask->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (!(sctp_spec && sctp_mask))
+ break;
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid sctp mask");
+ return -rte_errno;
+ }
+
+ /* Mask for SCTP src/dst ports not supported */
+ if (sctp_mask->hdr.src_port &&
+ sctp_mask->hdr.src_port != UINT16_MAX)
+ return -rte_errno;
+ if (sctp_mask->hdr.dst_port &&
+ sctp_mask->hdr.dst_port != UINT16_MAX)
+ return -rte_errno;
+
+ key->tp_src = sctp_spec->hdr.src_port;
+ key_mask->tp_src = sctp_mask->hdr.src_port;
+ key->tp_dst = sctp_spec->hdr.dst_port;
+ key_mask->tp_dst = sctp_mask->hdr.dst_port;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ {
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ static const struct rte_flow_item_vxlan flow_item_vxlan_mask = {
+ .vni = "\xff\xff\xff",
+ };
+ if (!(vxlan_spec && vxlan_mask))
+ break;
+ if (memcmp(vxlan_mask, &flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vxlan mask");
+ return -rte_errno;
+ }
+ rte_memcpy(key->vni, vxlan_spec->vni, 3);
+ rte_memcpy(key_mask->vni, vxlan_mask->vni, 3);
+ break;
+ }
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+
+ data_bitwise(key_mask, sizeof(*key_mask));
+ return 0;
+}
+
+static inline int
+validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+ if (rss->level > 1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ /* allow RSS key_len 0 in case of NULL (default) RSS key. */
+ if (rss->key_len == 0 && rss->key != NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key length 0");
+ if (rss->key_len > 0 && rss->key_len < ZXDH_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > ZXDH_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > dev->data->nb_rx_queues)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No queues configured");
+ return 0;
+}
+
+static int
+fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_item *item,
+ struct zxdh_flow *dh_flow)
+{
+ const struct rte_flow_item *items;
+ const struct rte_flow_item_eth *item_eth;
+ const struct rte_flow_item_vlan *item_vlan;
+ const struct rte_flow_item_ipv4 *item_ipv4;
+ const struct rte_flow_item_ipv6 *item_ipv6;
+ const struct rte_flow_item_udp *item_udp;
+ const struct rte_flow_item_vxlan *item_vxlan;
+ uint32_t i = 0;
+ rte_be32_t addr;
+
+ for (i = 0; i < ACTION_VXLAN_ENCAP_ITEMS_NUM; i++) {
+ items = &item[i];
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ item_eth = items->spec;
+ rte_memcpy(&dh_flow->encap0.dst_mac1, item_eth->dst.addr_bytes, 2);
+ rte_memcpy(&dh_flow->encap1.src_mac1, item_eth->src.addr_bytes, 2);
+ rte_memcpy(&dh_flow->encap0.dst_mac2, &item_eth->dst.addr_bytes[2], 4);
+ rte_memcpy(&dh_flow->encap1.src_mac2, &item_eth->src.addr_bytes[2], 4);
+ dh_flow->encap0.dst_mac1 = rte_bswap16(dh_flow->encap0.dst_mac1);
+ dh_flow->encap1.src_mac1 = rte_bswap16(dh_flow->encap1.src_mac1);
+ dh_flow->encap0.dst_mac2 = rte_bswap32(dh_flow->encap0.dst_mac2);
+ dh_flow->encap1.src_mac2 = rte_bswap32(dh_flow->encap1.src_mac2);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ item_vlan = items->spec;
+ dh_flow->encap1.vlan_tci = item_vlan->hdr.vlan_tci;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ item_ipv4 = items->spec;
+ dh_flow->encap0.ethtype = 0;
+ dh_flow->encap0.tos = item_ipv4->hdr.type_of_service;
+ dh_flow->encap0.ttl = item_ipv4->hdr.time_to_live;
+ addr = rte_bswap32(item_ipv4->hdr.src_addr);
+ rte_memcpy((uint32_t *)dh_flow->encap1.sip.ip_addr + 3, &addr, 4);
+ addr = rte_bswap32(item_ipv4->hdr.dst_addr);
+ rte_memcpy((uint32_t *)dh_flow->encap0.dip.ip_addr + 3, &addr, 4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ item_ipv6 = items->spec;
+ dh_flow->encap0.ethtype = 1;
+ dh_flow->encap0.tos =
+ (item_ipv6->hdr.vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
+ dh_flow->encap0.ttl = item_ipv6->hdr.hop_limits;
+ rte_memcpy(dh_flow->encap1.sip.ip_addr, &item_ipv6->hdr.src_addr, 16);
+ dh_flow->encap1.sip.ip_addr[0] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[0]);
+ dh_flow->encap1.sip.ip_addr[1] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[1]);
+ dh_flow->encap1.sip.ip_addr[2] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[2]);
+ dh_flow->encap1.sip.ip_addr[3] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[3]);
+ rte_memcpy(dh_flow->encap0.dip.ip_addr, &item_ipv6->hdr.dst_addr, 16);
+ dh_flow->encap0.dip.ip_addr[0] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[0]);
+ dh_flow->encap0.dip.ip_addr[1] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[1]);
+ dh_flow->encap0.dip.ip_addr[2] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[2]);
+ dh_flow->encap0.dip.ip_addr[3] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[3]);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ item_udp = items->spec;
+ dh_flow->encap0.tp_dst = item_udp->hdr.dst_port;
+ dh_flow->encap0.tp_dst = rte_bswap16(dh_flow->encap0.tp_dst);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ item_vxlan = items->spec;
+ dh_flow->encap0.vni = item_vxlan->vni[0] * 65536 +
+ item_vxlan->vni[1] * 256 + item_vxlan->vni[2];
+ break;
+ case RTE_FLOW_ITEM_TYPE_END:
+ default:
+ break;
+ }
+ }
+ dh_flow->encap0.hit_flag = 1;
+ dh_flow->encap1.hit_flag = 1;
+
+ return 0;
+}
+
+static int
+fd_flow_parse_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_flow_info *flow = NULL;
+ struct fd_flow_result *result = NULL;
+ const struct rte_flow_item *enc_item = NULL;
+ uint8_t action_bitmap = 0;
+ uint32_t dest_num = 0;
+ uint32_t mark_num = 0;
+ uint32_t counter_num = 0;
+ int ret;
+
+ rte_errno = 0;
+ if (dh_flow) {
+ flow = &dh_flow->flowentry;
+ } else {
+ flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory ");
+ return -rte_errno;
+ }
+ }
+ result = &flow->fd_flow.result;
+ action_bitmap = result->action_idx;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_RSS_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi rss action no support.");
+ goto free_flow;
+ }
+ ret = validate_action_rss(dev, actions, error);
+ if (ret)
+ goto free_flow;
+ action_bitmap |= (1 << FD_ACTION_RSS_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ mark_num++;
+ if (action_bitmap & (1 << FD_ACTION_MARK_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi mark action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_mark *act_mark = actions->conf;
+ result->mark_fd_id = rte_cpu_to_le_32(act_mark->id);
+ action_bitmap |= (1 << FD_ACTION_MARK_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ counter_num++;
+ if (action_bitmap & (1 << FD_ACTION_COUNT_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi count action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_count *act_count = actions->conf;
+ if (act_count->id > MAX_FLOW_COUNT_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "count action id no support.");
+ goto free_flow;
+ };
+ result->countid = act_count->id;
+ action_bitmap |= (1 << FD_ACTION_COUNT_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_QUEUE_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi queue action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_queue *act_q;
+ act_q = actions->conf;
+ if (act_q->index >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid queue ID");
+ goto free_flow;
+ }
+ ret = zxdh_hw_qid_to_logic_qid(dev, act_q->index << 1);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid phy queue ID .");
+ goto free_flow;
+ }
+ result->qid = rte_cpu_to_le_16(ret);
+ action_bitmap |= (1 << FD_ACTION_QUEUE_BIT);
+
+ PMD_DRV_LOG(DEBUG, "QID RET 0x%x", result->qid);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_DROP_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_DROP_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_VXLAN_DECAP)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_VXLAN_DECAP);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ enc_item = ((const struct rte_flow_action_vxlan_encap *)
+ actions->conf)->definition;
+ if (dh_flow != NULL)
+ fd_flow_parse_vxlan_encap(dev, enc_item, dh_flow);
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_VXLAN_ENCAP)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_VXLAN_ENCAP);
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ goto free_flow;
+ }
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ if (mark_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many mark actions");
+ return -rte_errno;
+ }
+
+ if (counter_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many count actions");
+ return -rte_errno;
+ }
+
+ if (dest_num + mark_num + counter_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Empty action");
+ return -rte_errno;
+ }
+
+ result->action_idx = action_bitmap;
+ return 0;
+
+free_flow:
+ if (!dh_flow)
+ rte_free(flow);
+ return -rte_errno;
+}
+
+static int
+fd_parse_pattern_action(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ int ret = 0;
+ ret = fd_flow_parse_attr(dev, attr, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+ ret = fd_flow_parse_pattern(dev, pattern, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+
+ ret = fd_flow_parse_action(dev, actions, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+ return 0;
+}
+
+struct dh_flow_engine pf_fd_engine = {
+ .apply = pf_fd_hw_apply,
+ .destroy = pf_fd_hw_destroy,
+ .query_count = pf_fd_hw_query_count,
+ .parse_pattern_action = fd_parse_pattern_action,
+ .type = FLOW_TYPE_FD_TCAM,
+};
+
+
+static int
+vf_flow_msg_process(enum zxdh_msg_type msg_type, struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow, struct rte_flow_error *error,
+ struct rte_flow_query_count *count)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_flow_op_msg *flow_msg = &msg_info.data.flow_msg;
+
+ uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+ void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body_addr, flow_rsp);
+ uint8_t flow_op_rsp[sizeof(struct zxdh_flow_op_rsp)] = {0};
+ uint16_t len = sizeof(struct zxdh_flow_op_rsp) - 4;
+ zxdh_adjust_flow_op_rsp_memory_layout(flow_rsp_addr, len, flow_op_rsp);
+ struct zxdh_flow_op_rsp *flow_rsp = (struct zxdh_flow_op_rsp *)flow_op_rsp;
+
+ dh_flow->hash_search_index = hw->hash_search_index;
+ rte_memcpy(&flow_msg->dh_flow, dh_flow, sizeof(struct zxdh_flow));
+
+ zxdh_msg_head_build(hw, msg_type, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+ (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow op %d failed ret %d", hw->port_id, msg_type, ret);
+ if (ret == -2) {
+ PMD_DRV_LOG(ERR, "port %d flow %d failed: cause %s",
+ hw->port_id, msg_type, flow_rsp->error.reason);
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ flow_rsp->error.reason);
+ } else {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "msg channel error");
+ }
+ return ret;
+ }
+
+ if (msg_type == ZXDH_FLOW_HW_ADD)
+ dh_flow->flowentry.hw_idx = flow_rsp->dh_flow.flowentry.hw_idx;
+ if (count)
+ rte_memcpy((void *)count, &flow_rsp->count, sizeof(flow_rsp->count));
+
+ return ret;
+}
+
+static int
+vf_fd_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport __rte_unused,
+ uint16_t pcieid __rte_unused)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_ADD, dev, dh_flow, error, NULL);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num++;
+ if (hw->vxlan_fd_num == 1) {
+ set_vxlan_enable(dev, 1, error);
+ PMD_DRV_LOG(DEBUG, "vf set_vxlan_enable");
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+vf_fd_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport __rte_unused,
+ uint16_t pcieid __rte_unused)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_DEL, dev, dh_flow, error, NULL);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num--;
+ if (hw->vxlan_fd_num == 0) {
+ set_vxlan_enable(dev, 0, error);
+ PMD_DRV_LOG(DEBUG, "vf set_vxlan_disable");
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+vf_fd_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_GET, dev, dh_flow, error, count);
+ return ret;
+}
+
+
+static struct dh_flow_engine vf_fd_engine = {
+ .apply = vf_fd_apply,
+ .destroy = vf_fd_destroy,
+ .parse_pattern_action = fd_parse_pattern_action,
+ .query_count = vf_fd_query_count,
+ .type = FLOW_TYPE_FD_TCAM,
+};
+
+void zxdh_flow_init(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ if (priv->is_pf)
+ zxdh_register_flow_engine(&pf_fd_engine);
+ else
+ zxdh_register_flow_engine(&vf_fd_engine);
+ TAILQ_INIT(&priv->dh_flow_list);
+}
+
+const struct rte_flow_ops zxdh_flow_ops = {
+ .validate = zxdh_flow_validate,
+ .create = zxdh_flow_create,
+ .destroy = zxdh_flow_destroy,
+ .flush = zxdh_flow_flush,
+ .query = zxdh_flow_query,
+ .dev_dump = zxdh_flow_dev_dump,
+};
+
+int
+zxdh_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &zxdh_flow_ops;
+
+ return 0;
+}
+
+void
+zxdh_flow_release(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error = {0};
+ const struct rte_flow_ops *flow_ops = NULL;
+
+ if (dev->dev_ops && dev->dev_ops->flow_ops_get)
+ dev->dev_ops->flow_ops_get(dev, &flow_ops);
+ if (flow_ops && flow_ops->flush)
+ flow_ops->flush(dev, &error);
+}
diff --git a/drivers/net/zxdh/zxdh_flow.h b/drivers/net/zxdh/zxdh_flow.h
new file mode 100644
index 0000000000..cbcf71b3e1
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_FLOW_H
+#define ZXDH_FLOW_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_arp.h>
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_icmp.h>
+#include <rte_ip.h>
+#include <rte_sctp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_byteorder.h>
+#include <rte_flow_driver.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_GROUP 1
+#define ZXDH_MAX_FLOW_NUM 2048
+#define MAX_FLOW_COUNT_NUM ZXDH_MAX_FLOW_NUM
+#define ZXDH_FLOW_GROUP_TCAM 1
+
+#ifndef IPv4_BYTES
+#define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
+#define IPv4_BYTES(addr) \
+ (uint8_t)(((addr) >> 24) & 0xFF),\
+ (uint8_t)(((addr) >> 16) & 0xFF),\
+ (uint8_t)(((addr) >> 8) & 0xFF),\
+ (uint8_t)((addr) & 0xFF)
+#endif
+
+#ifndef IPv6_BYTES
+#define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:" \
+ "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define IPv6_BYTES(addr) \
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], \
+ addr[8], addr[9], addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]
+#endif
+
+enum {
+ FD_ACTION_VXLAN_ENCAP = 0,
+ FD_ACTION_VXLAN_DECAP = 1,
+ FD_ACTION_RSS_BIT = 2,
+ FD_ACTION_COUNT_BIT = 3,
+ FD_ACTION_DROP_BIT = 4,
+ FD_ACTION_MARK_BIT = 5,
+ FD_ACTION_QUEUE_BIT = 6,
+};
+
+struct fd_flow_key {
+ struct rte_ether_addr mac_dst; /**< Destination MAC. */
+ struct rte_ether_addr mac_src; /**< Source MAC. */
+ rte_be16_t ether_type; /**< EtherType */
+ union {
+ struct {
+ rte_be16_t cvlan_pri:4; /**< vlanid 0xfff is valid */
+ rte_be16_t cvlan_vlanid:12; /**< vlanid 0xfff is valid */
+ };
+ rte_be16_t vlan_tci;
+ };
+
+ uint8_t src_ip[16]; /** ip src */
+ uint8_t dst_ip[16]; /** ip dst */
+ uint8_t rsv0;
+ union {
+ uint8_t tos;
+ uint8_t tc;
+ };
+ uint8_t nw_proto;
+ uint8_t frag_flag;/*1表示分片 0 表示非分片*/
+ rte_be16_t tp_src;
+ rte_be16_t tp_dst;
+
+ uint8_t rsv1;/**/
+ uint8_t vni[3];/**/
+
+ rte_be16_t vfid;
+ uint8_t rsv2[18];
+};
+
+struct fd_flow_result {
+ rte_le16_t qid;
+ uint8_t rsv0;
+
+ uint8_t action_idx:7;
+ uint8_t hit_flag:1;
+
+ rte_le32_t mark_fd_id;
+ rte_le32_t countid:20;
+ rte_le32_t encap1_index:12;
+
+ rte_le16_t encap0_index:12;
+ rte_le16_t rsv1:4;
+ uint8_t rss_hash_factor;
+ uint8_t rss_hash_alg;
+};
+
+struct fd_flow_entry {
+ struct fd_flow_key key;
+ struct fd_flow_key key_mask;
+ struct fd_flow_result result;
+};
+
+struct flow_stats {
+ uint32_t hit_pkts_hi;
+ uint32_t hit_pkts_lo;
+ uint32_t hit_bytes_hi;
+ uint32_t hit_bytes_lo;
+};
+
+
+enum dh_flow_type {
+ FLOW_TYPE_FLOW = 0,
+ FLOW_TYPE_FD_TCAM,
+ FLOW_TYPE_FD_SW,
+};
+
+struct zxdh_flow_info {
+ enum dh_flow_type flowtype;
+ uint16_t hw_idx;
+ uint16_t rsv;
+ union {
+ struct fd_flow_entry fd_flow;
+ };
+};
+
+struct tunnel_encap_ip {
+ rte_be32_t ip_addr[4];
+};
+
+struct tunnel_encap0 {
+ uint8_t tos;
+ uint8_t rsv2[2];
+ uint8_t rsv1: 6;
+ uint8_t ethtype: 1;
+ uint8_t hit_flag: 1;
+ uint16_t dst_mac1;
+ uint16_t tp_dst;
+ uint32_t dst_mac2;
+ uint32_t ttl:8;
+ uint32_t vni:24;
+ struct tunnel_encap_ip dip;
+};
+
+struct tunnel_encap1 {
+ uint32_t rsv1: 31;
+ uint32_t hit_flag: 1;
+ uint16_t src_mac1;
+ uint16_t vlan_tci;
+ uint32_t src_mac2;
+ uint32_t rsv;
+ struct tunnel_encap_ip sip;
+};
+
+struct zxdh_flow {
+ uint8_t direct; /* 0 in 1 out */
+ uint8_t group; /* rule group id */
+ uint8_t pri; /* priority */
+ uint8_t hash_search_index; /* */
+ struct zxdh_flow_info flowentry;
+ struct tunnel_encap0 encap0;
+ struct tunnel_encap1 encap1;
+};
+TAILQ_HEAD(dh_flow_list, rte_flow);
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ void *driver_flow;
+ uint32_t type;
+ uint16_t port_id;
+};
+
+struct count_res {
+ rte_spinlock_t count_lock;
+ uint8_t count_ref;
+ uint8_t rev[3];
+};
+
+/* Struct to store engine created. */
+struct dh_flow_engine {
+ TAILQ_ENTRY(dh_flow_engine) node;
+ enum dh_flow_type type;
+ int (*apply)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport, uint16_t pcieid);
+
+ int (*parse_pattern_action)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct zxdh_flow *dh_flow);
+
+ int (*destroy)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport, uint16_t pcieid);
+
+ int (*query_count)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+};
+TAILQ_HEAD(dh_engine_list, dh_flow_engine);
+
+void zxdh_register_flow_engine(struct dh_flow_engine *engine);
+
+extern const struct rte_flow_ops zxdh_flow_ops;
+
+void zxdh_flow_global_init(void);
+void zxdh_flow_init(struct rte_eth_dev *dev);
+int pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);
+int pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);
+int pf_fd_hw_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+int zxdh_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
+void zxdh_flow_release(struct rte_eth_dev *dev);
+
+#endif /* ZXDH_FLOW_H */
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 02ecd93b12..7e73833bf4 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -19,6 +19,7 @@
#include "zxdh_tables.h"
#include "zxdh_np.h"
#include "zxdh_common.h"
+#include "zxdh_flow.h"
#define ZXDH_REPS_INFO_FLAG_USABLE 0x00
#define ZXDH_BAR_SEQID_NUM_MAX 256
@@ -1234,7 +1235,8 @@ zxdh_vf_promisc_uninit(struct zxdh_hw *hw, union zxdh_virport_num vport)
}
static int
-zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
+zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
struct zxdh_port_attr_table port_attr = {0};
@@ -1253,6 +1255,9 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
port_attr.hash_search_index = pf_hw->hash_search_index;
port_attr.port_base_qid = vf_init_msg->base_qid;
uint16_t vfid = zxdh_vport_to_vfid(port);
+ int vf_index = VF_IDX(pcieid);
+
+ pf_hw->vfinfo[vf_index].vport = vport;
ret = zxdh_set_port_attr(pf_hw, vfid, &port_attr);
if (ret) {
@@ -1265,6 +1270,12 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
PMD_DRV_LOG(ERR, "vf_promisc_table_init failed, code:%d", ret);
goto proc_end;
}
+
+ ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, pf_hw->dev_sd->dtb_sd.queueid,
+ ZXDH_SDT_FD_TABLE, vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow table delete failed. code:%d", ret);
+
ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
*res_len = sizeof(uint8_t);
@@ -1276,30 +1287,30 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport)
+zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport, uint16_t pcieid)
{
- uint16_t vf_id = vport.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
int i;
int ret = 0;
for (i = 0; (i != ZXDH_MAX_MAC_ADDRS); ++i) {
- if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {
+ if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {
ret = zxdh_del_mac_table(hw, vport.vport,
- &hw->vfinfo[vf_id].vf_mac[i],
+ &hw->vfinfo[vf_index].vf_mac[i],
hw->hash_search_index, 0, 0);
if (ret) {
PMD_DRV_LOG(ERR, "vf_del_mac_failed. code:%d", ret);
return ret;
}
- memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));
+ memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));
}
}
return ret;
}
static int
-zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
- uint16_t vport, void *cfg_data __rte_unused,
+zxdh_vf_port_uninit(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data __rte_unused,
void *res_info, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "uninit";
@@ -1317,7 +1328,7 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
goto proc_end;
}
- ret = zxdh_mac_clear(pf_hw, vport_num);
+ ret = zxdh_mac_clear(pf_hw, vport_num, pcieid);
if (ret) {
PMD_DRV_LOG(ERR, "zxdh_mac_clear failed, code:%d", ret);
goto proc_end;
@@ -1342,7 +1353,8 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
}
static int
-zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *reply_body, uint16_t *reply_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "add mac";
@@ -1350,13 +1362,13 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;
struct rte_ether_addr *addr = &mac_filter->mac;
int i = 0, ret = 0;
- uint16_t vf_id = port.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
port.vport = vport;
void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, reply_data);
void *mac_reply_msg_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, mac_reply_msg);
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++)
- if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], addr))
+ if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], addr))
goto success;
ret = zxdh_add_mac_table(hw, vport, addr, hw->hash_search_index, 0, 0);
@@ -1372,8 +1384,8 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
goto failure;
}
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {
- if (rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {
- memcpy(&hw->vfinfo[vf_id].vf_mac[i], addr, 6);
+ if (rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {
+ memcpy(&hw->vfinfo[vf_index].vf_mac[i], addr, 6);
break;
}
}
@@ -1393,14 +1405,15 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len)
+zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret, i = 0;
struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;
union zxdh_virport_num port = (union zxdh_virport_num)vport;
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "del mac";
- uint16_t vf_id = port.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, reply_data);
PMD_DRV_LOG(DEBUG, "[PF GET MSG FROM VF]--vf mac to del.");
@@ -1415,8 +1428,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {
- if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], &mac_filter->mac))
- memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));
+ if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], &mac_filter->mac))
+ memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));
}
sprintf(str, "vport 0x%x del mac ret 0x%x\n", port.vport, ret);
@@ -1432,7 +1445,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_port_promisc_msg *promisc_msg = (struct zxdh_port_promisc_msg *)cfg_data;
@@ -1463,7 +1477,8 @@ zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *res_info, uint16_t *res_len, uint8_t enable)
{
struct zxdh_vlan_filter *vlan_filter = cfg_data;
@@ -1488,21 +1503,24 @@ zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_
}
static int
-zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
- return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 1);
+ return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 1);
}
static int
-zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
- return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 0);
+ return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 0);
}
static int
-zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_vlan_filter_set *vlan_filter = cfg_data;
@@ -1526,7 +1544,8 @@ zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_vlan_offload *vlan_offload = cfg_data;
@@ -1553,8 +1572,9 @@ zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";
struct zxdh_port_attr_table vport_att = {0};
@@ -1582,8 +1602,9 @@ zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unus
}
static int
-zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";
struct zxdh_rss_hf *rss_hf = cfg_data;
@@ -1618,8 +1639,9 @@ zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_enable";
struct zxdh_rss_enable *rss_enable = cfg_data;
@@ -1654,7 +1676,8 @@ zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";
@@ -1676,7 +1699,8 @@ zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,
+zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,
void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";
@@ -1699,8 +1723,9 @@ zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_u
}
static int
-zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len)
+zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
RTE_ASSERT(!cfg_data || !pf_hw);
if (res_info)
@@ -1762,8 +1787,8 @@ zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
static int
zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,
- void *cfg_data, void *res_info,
- uint16_t *res_len)
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
struct zxdh_np_stats_updata_msg *np_stats_query =
(struct zxdh_np_stats_updata_msg *)cfg_data;
@@ -1944,10 +1969,9 @@ zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,
}
static int
-zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,
- uint16_t vport, void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
struct zxdh_mtr_stats_query *zxdh_mtr_stats_query =
(struct zxdh_mtr_stats_query *)cfg_data;
@@ -1977,11 +2001,9 @@ zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
if (!cfg_data || !res_len || !res_info) {
PMD_DRV_LOG(ERR, " get profileid invalid inparams");
@@ -2017,11 +2039,9 @@ zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
if (!cfg_data || !res_len || !res_info) {
PMD_DRV_LOG(ERR, " del profileid invalid inparams");
@@ -2059,11 +2079,9 @@ zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret = 0;
@@ -2098,11 +2116,9 @@ zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret = 0;
@@ -2131,6 +2147,121 @@ zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
return 0;
}
+
+static int
+zxdh_vf_flow_hw_add(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ struct zxdh_flow *dh_flow;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = pf_fd_hw_apply(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow add failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);
+ dh_flow = flow_rsp_addr;
+ dh_flow->flowentry.hw_idx = flow_entry->dh_flow.flowentry.hw_idx;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_del(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = pf_fd_hw_destroy(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow del failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_get(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);
+ void *count_addr = (uint8_t *)flow_rsp_addr + sizeof(struct zxdh_flow);
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ struct zxdh_flow *dh_flow;
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ PMD_DRV_LOG(INFO, "handle %d", flow_entry->dh_flow.flowentry.hw_idx);
+ ret = pf_fd_hw_query_count(pf_hw->eth_dev, &flow_entry->dh_flow, count_addr, &error);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "pf 0x%x for vf 0x%x flow get failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, " res len :%d", *res_len);
+ dh_flow = flow_rsp_addr;
+ rte_memcpy(&dh_flow->flowentry, &flow_entry->dh_flow.flowentry, sizeof(dh_flow->flowentry));
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_flush(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ int ret = 0;
+ uint16_t queue_id = pf_hw->dev_sd->dtb_sd.queueid;
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, queue_id, ZXDH_SDT_FD_TABLE,
+ vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "flow flush failed. code:%d", ret);
+ return -1;
+ }
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
static const zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_NULL] = NULL,
[ZXDH_VF_PORT_INIT] = zxdh_vf_port_init,
@@ -2154,6 +2285,10 @@ static const zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_PLCR_CAR_PROFILE_ID_DELETE] = zxdh_vf_mtr_hw_profile_del,
[ZXDH_PLCR_CAR_QUEUE_CFG_SET] = zxdh_vf_mtr_hw_plcrflow_cfg,
[ZXDH_PLCR_CAR_PROFILE_CFG_SET] = zxdh_vf_mtr_hw_profile_cfg,
+ [ZXDH_FLOW_HW_ADD] = zxdh_vf_flow_hw_add,
+ [ZXDH_FLOW_HW_DEL] = zxdh_vf_flow_hw_del,
+ [ZXDH_FLOW_HW_GET] = zxdh_vf_flow_hw_get,
+ [ZXDH_FLOW_HW_FLUSH] = zxdh_vf_flow_hw_flush,
};
static inline int
@@ -2168,7 +2303,7 @@ zxdh_config_process_callback(struct zxdh_hw *hw, struct zxdh_msg_info *msg_info,
return -1;
}
if (zxdh_proc_cb[msghead->msg_type]) {
- ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport,
+ ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport, msghead->pcieid,
(void *)&msg_info->data, res, res_len);
if (!ret)
ZXDH_SET(msg_reply_body, res, flag, ZXDH_REPS_SUCC);
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 7dad6f7335..c20bb98195 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -240,6 +240,11 @@ enum zxdh_msg_type {
ZXDH_PLCR_CAR_QUEUE_CFG_SET = 40,
ZXDH_PORT_METER_STAT_GET = 42,
+ ZXDH_FLOW_HW_ADD = 46,
+ ZXDH_FLOW_HW_DEL = 47,
+ ZXDH_FLOW_HW_GET = 48,
+ ZXDH_FLOW_HW_FLUSH = 49,
+
ZXDH_MSG_TYPE_END,
};
@@ -418,6 +423,21 @@ struct zxdh_ifc_mtr_profile_info_bits {
uint8_t profile_id[0x40];
};
+struct err_reason {
+ uint8_t err_type;
+ uint8_t rsv[3];
+ char reason[512];
+};
+
+struct zxdh_flow_op_rsp {
+ struct zxdh_flow dh_flow;
+ uint8_t rev[4];
+ union {
+ struct rte_flow_query_count count;
+ struct err_reason error;
+ };
+};
+
struct zxdh_ifc_msg_reply_body_bits {
uint8_t flag[0x8];
union {
@@ -432,6 +452,7 @@ struct zxdh_ifc_msg_reply_body_bits {
struct zxdh_ifc_agent_mac_module_eeprom_msg_bits module_eeprom_msg;
struct zxdh_ifc_mtr_profile_info_bits mtr_profile_info;
struct zxdh_ifc_mtr_stats_bits hw_mtr_stats;
+ struct zxdh_flow_op_rsp flow_rsp;
};
};
@@ -535,6 +556,10 @@ struct zxdh_plcr_profile_free {
uint16_t profile_id;
};
+struct zxdh_flow_op_msg {
+ struct zxdh_flow dh_flow;
+};
+
struct zxdh_msg_info {
union {
uint8_t head_len[ZXDH_MSG_HEAD_LEN];
@@ -561,13 +586,15 @@ struct zxdh_msg_info {
struct zxdh_plcr_profile_cfg zxdh_plcr_profile_cfg;
struct zxdh_plcr_flow_cfg zxdh_plcr_flow_cfg;
struct zxdh_mtr_stats_query zxdh_mtr_stats_query;
+ struct zxdh_flow_op_msg flow_msg;
} data;
};
typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
void *reps_buffer, uint16_t *reps_len, void *dev);
-typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len);
+typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len);
typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
void *reps_buffer, uint16_t *reps_len, void *dev);
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index cb34e38be8..a227e09962 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -7,6 +7,8 @@
#include <stdint.h>
+#include <zxdh_msg.h>
+
/* eram */
#define ZXDH_SDT_VPORT_ATT_TABLE 1
#define ZXDH_SDT_PANEL_ATT_TABLE 2
@@ -16,6 +18,8 @@
#define ZXDH_SDT_UNICAST_ATT_TABLE 10
#define ZXDH_SDT_MULTICAST_ATT_TABLE 11
#define ZXDH_SDT_PORT_VLAN_ATT_TABLE 16
+#define ZXDH_SDT_TUNNEL_ENCAP0_TABLE 28
+#define ZXDH_SDT_TUNNEL_ENCAP1_TABLE 29
/* hash */
#define ZXDH_SDT_L2_ENTRY_TABLE0 64
#define ZXDH_SDT_L2_ENTRY_TABLE1 65
@@ -27,12 +31,14 @@
#define ZXDH_SDT_MC_TABLE2 78
#define ZXDH_SDT_MC_TABLE3 79
+#define ZXDH_SDT_FD_TABLE 130
+
#define ZXDH_PORT_VHCA_FLAG 1
#define ZXDH_PORT_RSS_HASH_FACTOR_FLAG 3
#define ZXDH_PORT_HASH_ALG_FLAG 4
#define ZXDH_PORT_PHY_PORT_FLAG 5
#define ZXDH_PORT_LAG_ID_FLAG 6
-
+#define ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF 7
#define ZXDH_PORT_PF_VQM_VFID_FLAG 8
#define ZXDH_PORT_MTU_FLAG 10
@@ -169,7 +175,7 @@ struct zxdh_port_attr_table {
uint8_t phy_port: 4;
uint16_t lag_id : 3;
- uint16_t rsv81 : 1;
+ uint16_t fd_vxlan_offload_en : 1;
uint16_t pf_vfid : 11;
uint16_t rsv82 : 1;
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 270896 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
@ 2025-06-17 14:07 ` Stephen Hemminger
2025-06-17 14:08 ` Stephen Hemminger
1 sibling, 0 replies; 9+ messages in thread
From: Stephen Hemminger @ 2025-06-17 14:07 UTC (permalink / raw)
To: Bingbin Chen; +Cc: wang.junlong1, yang.yonggang, dev
On Tue, 17 Jun 2025 17:32:25 +0800
Bingbin Chen <chen.bingbin@zte.com.cn> wrote:
> +static uint32_t
> +zxdh_np_agent_channel_acl_index_request(uint32_t dev_id,
> + uint32_t sdt_no,
> + uint32_t vport,
> + uint32_t *p_index)
> +{
> + uint32_t rc = ZXDH_OK;
Odd indentation here. Should either align, be nested or use one less tab.
static uint32_t
zxdh_np_agent_channel_acl_index_request(uint32_t dev_id,
uint32_t sdt_no,
uint32_t vport,
uint32_t *p_index)
{
OR
static uint32_t
zxdh_np_agent_channel_acl_index_request(uint32_t dev_id,
uint32_t sdt_no,
uint32_t vport,
uint32_t *p_index)
{
OR
static uint32_t
zxdh_np_agent_channel_acl_index_request(uint32_t dev_id, uint32_t sdt_no,
uint32_t vport, uint32_t *p_index)
{
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
2025-06-17 14:07 ` Stephen Hemminger
@ 2025-06-17 14:08 ` Stephen Hemminger
1 sibling, 0 replies; 9+ messages in thread
From: Stephen Hemminger @ 2025-06-17 14:08 UTC (permalink / raw)
To: Bingbin Chen; +Cc: wang.junlong1, yang.yonggang, dev
On Tue, 17 Jun 2025 17:32:25 +0800
Bingbin Chen <chen.bingbin@zte.com.cn> wrote:
> + uint32_t msg_result = 0;
> + uint32_t acl_index = 0;
> + ZXDH_AGENT_CHANNEL_ACL_MSG_T msgcfg = {
> + .dev_id = 0,
> + .type = ZXDH_ACL_MSG,
> + .oper = ZXDH_ACL_INDEX_REQUEST,
> + .vport = vport,
> + .sdt_no = sdt_no,
> + };
> + ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
> + .msg = (void *)&msgcfg,
> + .msg_len = sizeof(ZXDH_AGENT_CHANNEL_ACL_MSG_T),
> + };
> +
> + rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
> + if (rc != ZXDH_OK) {
> + PMD_DRV_LOG(ERR, "agent send msg failed");
> + return ZXDH_ERR;
> + }
> +
> + msg_result = rsp_buff[0];
> + acl_index = rsp_buff[1];
> +
> + PMD_DRV_LOG(DEBUG, "dev_id: %d, msg_result: %d", dev_id, msg_result);
> + PMD_DRV_LOG(DEBUG, "dev_id: %d, acl_index: %d", dev_id, acl_index);
> +
Some compiler versions will complain about printing an unsigned type (msg_result)
with a signed format (%d). This problem exists many places in DPDK so no urgent
need to address it.
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2 0/2] add support flow director ops
2025-06-17 9:31 [PATCH v1 0/2] add support flow director ops Bingbin Chen
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
2025-06-17 9:32 ` [PATCH v1 2/2] net/zxdh: add support flow director ops Bingbin Chen
@ 2025-06-18 7:49 ` Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 2/2] net/zxdh: add support flow director ops Bingbin Chen
2 siblings, 2 replies; 9+ messages in thread
From: Bingbin Chen @ 2025-06-18 7:49 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 1131 bytes --]
V2:
- resolve code style and gcc compilation issue.
V1:
- add support flow director ops.
Bingbin Chen (2):
net/zxdh: npsdk add flow director table ops
net/zxdh: add support flow director ops
doc/guides/nics/features/zxdh.ini | 16 +
doc/guides/nics/zxdh.rst | 1 +
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_common.h | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 27 +
drivers/net/zxdh/zxdh_ethdev.h | 13 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 2 +-
drivers/net/zxdh/zxdh_ethdev_ops.h | 1 +
drivers/net/zxdh/zxdh_flow.c | 2004 ++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_flow.h | 237 ++++
drivers/net/zxdh/zxdh_msg.c | 263 +++-
drivers/net/zxdh/zxdh_msg.h | 31 +-
drivers/net/zxdh/zxdh_np.c | 1638 +++++++++++++++++++++++
drivers/net/zxdh/zxdh_np.h | 31 +-
drivers/net/zxdh/zxdh_tables.h | 10 +-
15 files changed, 4204 insertions(+), 72 deletions(-)
create mode 100644 drivers/net/zxdh/zxdh_flow.c
create mode 100644 drivers/net/zxdh/zxdh_flow.h
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 2327 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2 1/2] net/zxdh: npsdk add flow director table ops
2025-06-18 7:49 ` [PATCH v2 0/2] " Bingbin Chen
@ 2025-06-18 7:49 ` Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 2/2] net/zxdh: add support flow director ops Bingbin Chen
1 sibling, 0 replies; 9+ messages in thread
From: Bingbin Chen @ 2025-06-18 7:49 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 52915 bytes --]
Implement flow director table entry write, delete and get
operation functions by dtb channel.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
drivers/net/zxdh/zxdh_np.c | 1638 ++++++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_np.h | 31 +-
2 files changed, 1668 insertions(+), 1 deletion(-)
diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c
index 66902e7e92..57978957f9 100644
--- a/drivers/net/zxdh/zxdh_np.c
+++ b/drivers/net/zxdh/zxdh_np.c
@@ -3055,6 +3055,77 @@ zxdh_np_agent_channel_se_res_get(uint32_t dev_id,
return msg_result;
}
+static uint32_t
+zxdh_np_agent_channel_acl_index_request(uint32_t dev_id, uint32_t sdt_no,
+ uint32_t vport, uint32_t *p_index)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t rsp_buff[2] = {0};
+ uint32_t msg_result = 0;
+ uint32_t acl_index = 0;
+ ZXDH_AGENT_CHANNEL_ACL_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_ACL_MSG,
+ .oper = ZXDH_ACL_INDEX_REQUEST,
+ .vport = vport,
+ .sdt_no = sdt_no,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_ACL_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "agent send msg failed");
+ return ZXDH_ERR;
+ }
+
+ msg_result = rsp_buff[0];
+ acl_index = rsp_buff[1];
+
+ PMD_DRV_LOG(DEBUG, "dev_id: %u, msg_result: %u", dev_id, msg_result);
+ PMD_DRV_LOG(DEBUG, "dev_id: %u, acl_index: %u", dev_id, acl_index);
+
+ *p_index = acl_index;
+
+ return msg_result;
+}
+
+static uint32_t
+zxdh_np_agent_channel_acl_index_release(uint32_t dev_id, uint32_t rel_type,
+ uint32_t sdt_no, uint32_t vport, uint32_t index)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t msg_result = 0;
+ uint32_t rsp_buff[2] = {0};
+ ZXDH_AGENT_CHANNEL_ACL_MSG_T msgcfg = {
+ .dev_id = 0,
+ .type = ZXDH_ACL_MSG,
+ .oper = rel_type,
+ .index = index,
+ .sdt_no = sdt_no,
+ .vport = vport,
+ };
+ ZXDH_AGENT_CHANNEL_MSG_T agent_msg = {
+ .msg = (void *)&msgcfg,
+ .msg_len = sizeof(ZXDH_AGENT_CHANNEL_ACL_MSG_T),
+ };
+
+ rc = zxdh_np_agent_channel_sync_send(dev_id, &agent_msg, rsp_buff, sizeof(rsp_buff));
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "agent send msg failed");
+ return ZXDH_ERR;
+ }
+
+ msg_result = rsp_buff[0];
+ PMD_DRV_LOG(DEBUG, "msg_result: %u", msg_result);
+
+ return msg_result;
+}
+
static ZXDH_DTB_MGR_T *
zxdh_np_dtb_mgr_get(uint32_t dev_id)
{
@@ -6500,6 +6571,11 @@ zxdh_np_dtb_table_entry_delete(uint32_t dev_id,
if (rc == ZXDH_HASH_RC_DEL_SRHFAIL)
continue;
break;
+ case ZXDH_SDT_TBLT_ETCAM:
+ rc = zxdh_np_dtb_acl_one_entry(dev_id, sdt_no,
+ ZXDH_DTB_ITEM_DELETE, pentry->p_entry_data,
+ &dtb_len, p_data_buff);
+ continue;
default:
PMD_DRV_LOG(ERR, "SDT table_type[ %u ] is invalid!", tbl_type);
rte_free(p_data_buff);
@@ -11204,3 +11280,1565 @@ zxdh_np_stat_car_queue_cfg_set(uint32_t dev_id,
return rc;
}
+
+uint32_t
+zxdh_np_dtb_acl_index_request(uint32_t dev_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t *p_index)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index = 0;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+ rc = zxdh_np_agent_channel_acl_index_request(dev_id, sdt_no, vport, &index);
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ *p_index = index;
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_index_release(uint32_t dev_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t index)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+
+ rc = zxdh_np_agent_channel_acl_index_release(dev_id,
+ ZXDH_ACL_INDEX_RELEASE, sdt_no, vport, index);
+
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_sdt_eram_table_dump(uint32_t dev_id, uint32_t queue_id, uint32_t sdt_no,
+ uint32_t start_index, uint32_t depth, uint32_t *p_data, uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_base_addr = 0;
+ uint32_t dump_addr_128bit = 0;
+ uint32_t dump_item_index = 0;
+ uint32_t dump_data_len = 0;
+ uint32_t dump_desc_len = 0;
+ uint64_t dump_sdt_phy_addr = 0;
+ uint64_t dump_sdt_vir_addr = 0;
+ uint32_t dump_addr_size = 0;
+ uint32_t dump_dst_phy_haddr = 0;
+ uint32_t dump_dst_phy_laddr = 0;
+ uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ eram_base_addr = sdt_eram.eram_base_addr;
+ dump_addr_128bit = eram_base_addr + start_index;
+
+ rc = zxdh_np_dtb_dump_sdt_addr_get(dev_id,
+ queue_id,
+ sdt_no,
+ &dump_sdt_phy_addr,
+ &dump_sdt_vir_addr,
+ &dump_addr_size);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_dump_sdt_addr_get");
+
+ memset((uint8_t *)dump_sdt_vir_addr, 0, dump_addr_size);
+ rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &dump_item_index);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_free_item_get");
+ PMD_DRV_LOG(DEBUG, "dump queue id %u, element_id is: %u.",
+ queue_id, dump_item_index);
+
+ *element_id = dump_item_index;
+
+ rc = zxdh_np_dtb_tab_up_item_user_addr_set(dev_id,
+ queue_id,
+ dump_item_index,
+ dump_sdt_phy_addr,
+ dump_sdt_vir_addr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_item_addr_set");
+
+ rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, dump_item_index,
+ &dump_dst_phy_haddr, &dump_dst_phy_laddr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_tab_up_item_addr_get");
+
+ rc = zxdh_np_dtb_smmu0_dump_info_write(dev_id,
+ dump_addr_128bit,
+ depth,
+ dump_dst_phy_haddr,
+ dump_dst_phy_laddr,
+ (uint32_t *)form_buff);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_dump_info_write");
+
+ dump_data_len = depth * 128 / 32;
+ dump_desc_len = ZXDH_DTB_LEN_POS_SETP / 4;
+
+ if (dump_data_len * 4 > dump_addr_size) {
+ PMD_DRV_LOG(ERR, "eram dump size is too small!");
+ return ZXDH_RC_DTB_DUMP_SIZE_SMALL;
+ }
+
+ rc = zxdh_np_dtb_write_dump_desc_info(dev_id,
+ queue_id,
+ dump_item_index,
+ (uint32_t *)form_buff,
+ dump_data_len,
+ dump_desc_len,
+ p_data);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_write_dump_desc_info");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_table_dump(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ ZXDH_DTB_DUMP_INDEX_T start_index,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr,
+ uint32_t *entry_num,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T *next_start_index,
+ uint32_t *finish_flag)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t i = 0;
+ uint32_t dump_mode = 0;
+ uint32_t eram_table_depth = 0;
+ uint32_t start_index_128bit = 0;
+ uint32_t row_index = 0;
+ uint32_t col_index = 0;
+ uint32_t dump_depth_128bit = 0;
+ uint32_t dump_depth = 0;
+ uint32_t element_id = 0;
+ uint8_t *dump_data_buff = NULL;
+ uint8_t *temp_data = NULL;
+ uint32_t remain = 0;
+ uint32_t *buff = NULL;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_user_data = NULL;
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ dump_mode = sdt_eram.eram_mode;
+ eram_table_depth = sdt_eram.eram_table_depth;
+
+ zxdh_np_eram_index_cal(dump_mode, eram_table_depth,
+ &dump_depth_128bit, &col_index);
+
+ zxdh_np_eram_index_cal(dump_mode, start_index.index,
+ &start_index_128bit, &col_index);
+
+ dump_depth = dump_depth_128bit - start_index_128bit;
+
+ dump_data_buff = (uint8_t *)rte_zmalloc(NULL, dump_depth * ZXDH_DTB_LEN_POS_SETP, 0);
+ if (dump_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_sdt_eram_table_dump(dev_id,
+ queue_id,
+ sdt_no,
+ start_index_128bit,
+ dump_depth,
+ (uint32_t *)dump_data_buff,
+ &element_id);
+
+ if (dump_mode == ZXDH_ERAM128_TBL_128b) {
+ for (i = 0; i < dump_depth; i++) {
+ p_dump_user_data = p_dump_data_arr + i;
+ temp_data = dump_data_buff + i * ZXDH_DTB_LEN_POS_SETP;
+ if (p_dump_user_data == NULL || p_dump_user_data->p_data == NULL) {
+ PMD_DRV_LOG(ERR, "data buff is NULL!");
+ rte_free(dump_data_buff);
+ return ZXDH_ERR;
+ }
+
+ p_dump_user_data->index = start_index.index + i;
+ rte_memcpy(p_dump_user_data->p_data, temp_data, (128 / 8));
+ }
+ } else if (dump_mode == ZXDH_ERAM128_TBL_64b) {
+ remain = start_index.index % 2;
+ for (i = 0; i < eram_table_depth - start_index.index; i++) {
+ zxdh_np_eram_index_cal(dump_mode, remain, &row_index, &col_index);
+ temp_data = dump_data_buff + row_index * ZXDH_DTB_LEN_POS_SETP;
+
+ buff = (uint32_t *)temp_data;
+ p_dump_user_data = p_dump_data_arr + i;
+
+ if (p_dump_user_data == NULL || p_dump_user_data->p_data == NULL) {
+ PMD_DRV_LOG(ERR, "data buff is NULL!");
+ rte_free(dump_data_buff);
+ return ZXDH_ERR;
+ }
+
+ p_dump_user_data->index = start_index.index + i;
+ rte_memcpy(p_dump_user_data->p_data,
+ buff + ((1 - col_index) << 1), (64 / 8));
+
+ remain++;
+ }
+ }
+
+ *entry_num = eram_table_depth - start_index.index;
+ *finish_flag = 1;
+ PMD_DRV_LOG(DEBUG, "dump entry num %u, finish flag %u", *entry_num, *finish_flag);
+
+ rte_free(dump_data_buff);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_index_parse(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t eram_sdt_no,
+ uint32_t vport,
+ uint32_t *index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_table_depth = 0;
+ uint32_t byte_num = 0;
+ uint32_t i = 0;
+ uint32_t entry_num = 0;
+ uint32_t valid_entry_num = 0;
+ uint32_t finish_flag = 0;
+ uint8_t valid = 0;
+ uint32_t temp_vport = 0;
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_dump_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+ ZXDH_DTB_DUMP_INDEX_T start_index = {0};
+ ZXDH_DTB_DUMP_INDEX_T next_start_index = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ byte_num = (sdt_eram.eram_mode == ZXDH_ERAM128_TBL_64b) ? 8 : 16;
+ eram_table_depth = sdt_eram.eram_table_depth;
+ p_dump_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, eram_table_depth *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_dump_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "p_dump_data_arr point null!");
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, byte_num * eram_table_depth, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_dump_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < eram_table_depth; i++) {
+ p_dump_data_arr[i].index = i;
+ p_dump_data_arr[i].p_data = (uint32_t *)(data_buff + i * byte_num);
+ }
+
+ start_index.index = 0;
+ rc = zxdh_np_dtb_eram_table_dump(dev_id,
+ queue_id,
+ eram_sdt_no,
+ start_index,
+ p_dump_data_arr,
+ &entry_num,
+ &next_start_index,
+ &finish_flag);
+
+ for (i = 0; i < entry_num; i++) {
+ valid = (p_dump_data_arr[i].p_data[0] >> 31) & 0x1;
+ temp_vport = p_dump_data_arr[i].p_data[0] & 0x7fffffff;
+ if (valid && temp_vport == vport) {
+ p_index_array[valid_entry_num] = i;
+ valid_entry_num++;
+ }
+ }
+
+ *index_num = valid_entry_num;
+ rte_free(data_buff);
+ rte_free(p_dump_data_arr);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_etcam_ind_data_get(uint8_t *p_in_data, uint32_t rd_mode, uint8_t *p_out_data)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint8_t *p_temp = NULL;
+ uint32_t offset = 0;
+ uint8_t buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+
+ p_temp = p_out_data;
+ rte_memcpy(buff, p_in_data, ZXDH_ETCAM_WIDTH_MAX / 8);
+
+ zxdh_np_comm_swap(buff, ZXDH_ETCAM_WIDTH_MAX / 8);
+
+ for (i = 0; i < ZXDH_ETCAM_RAM_NUM; i++) {
+ offset = i * (ZXDH_ETCAM_WIDTH_MIN / 8);
+
+ if ((rd_mode >> (ZXDH_ETCAM_RAM_NUM - 1 - i)) & 0x1) {
+ rte_memcpy(p_temp, buff + offset, ZXDH_ETCAM_WIDTH_MIN / 8);
+ p_temp += ZXDH_ETCAM_WIDTH_MIN / 8;
+ }
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_table_dump(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T start_index,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_data_arr,
+ uint32_t *p_entry_num,
+ __rte_unused ZXDH_DTB_DUMP_INDEX_T *next_start_index,
+ uint32_t *p_finish_flag)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint32_t handle = 0;
+
+ uint32_t dump_element_id = 0;
+
+ uint8_t *temp_dump_out_data = NULL;
+ uint8_t *dump_info_buff = NULL;
+ uint8_t *p_data_start = NULL;
+ uint8_t *p_data_640bit = NULL;
+ uint8_t *p_mask_start = NULL;
+ uint8_t *p_mask_640bit = NULL;
+ uint8_t *p_rst_start = NULL;
+ uint8_t *p_rst_128bit = NULL;
+ uint32_t *eram_buff = NULL;
+
+ uint32_t addr_640bit = 0;
+ uint32_t rd_mask = 0;
+ uint32_t dump_eram_depth_128bit = 0;
+ uint32_t eram_row_index = 0;
+ uint32_t eram_col_index = 0;
+
+ uint8_t cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t xy_data[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t xy_mask[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t dm_data[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t dm_mask[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ ZXDH_ETCAM_ENTRY_T entry_xy = {0};
+ ZXDH_ETCAM_ENTRY_T entry_dm = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_user_data = NULL;
+
+ uint32_t block_num = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t etcam_table_id = 0;
+ uint32_t as_enable = 0;
+ uint32_t as_eram_baddr = 0;
+ uint32_t etcam_as_mode = 0;
+ uint32_t etcam_table_depth = 0;
+ uint32_t block_idx = 0;
+
+ uint32_t etcam_data_dst_phy_haddr = 0;
+ uint32_t etcam_data_dst_phy_laddr = 0;
+ uint32_t etcam_mask_dst_phy_haddr = 0;
+ uint32_t etcam_mask_dst_phy_laddr = 0;
+ uint32_t as_rst_dst_phy_haddr = 0;
+ uint32_t as_rst_dst_phy_laddr = 0;
+
+ uint32_t dtb_desc_addr_offset = 0;
+ uint32_t dump_data_len = 0;
+ uint32_t dtb_desc_len = 0;
+
+ uint32_t etcam_data_len_offset = 0;
+ uint32_t etcam_mask_len_offset = 0;
+ uint32_t data_byte_size = 0;
+
+ ZXDH_ACL_CFG_EX_T *p_acl_cfg = NULL;
+ ZXDH_ACL_TBL_CFG_T *p_tbl_cfg = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_ETCAM_DUMP_INFO_T etcam_dump_info = {0};
+ ZXDH_DTB_ENTRY_T dtb_dump_entry = {0};
+
+ uint32_t shift_amount = 0;
+ uint32_t mask_base = 0;
+ uint32_t offset = 0;
+
+ dtb_dump_entry.cmd = cmd_buff;
+ entry_xy.p_data = xy_data;
+ entry_xy.p_mask = xy_mask;
+ entry_dm.p_data = dm_data;
+ entry_dm.p_mask = dm_mask;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+ etcam_table_id = sdt_etcam_info.etcam_table_id;
+ as_enable = sdt_etcam_info.as_en;
+ as_eram_baddr = sdt_etcam_info.as_eram_baddr;
+ etcam_table_depth = sdt_etcam_info.etcam_table_depth;
+
+ zxdh_np_acl_cfg_get(dev_id, &p_acl_cfg);
+
+ p_tbl_cfg = p_acl_cfg->acl_tbls + etcam_table_id;
+
+ if (!p_tbl_cfg->is_used) {
+ PMD_DRV_LOG(ERR, "table[ %u ] is not init!", etcam_table_id);
+ RTE_ASSERT(0);
+ return ZXDH_ACL_RC_TBL_NOT_INIT;
+ }
+
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+ if (data_byte_size > ZXDH_ETCAM_RAM_WIDTH) {
+ PMD_DRV_LOG(ERR, "etcam date size is over 80B!");
+ return ZXDH_ACL_RC_INVALID_PARA;
+ }
+
+ block_num = p_tbl_cfg->block_num;
+
+ rc = zxdh_np_dtb_dump_addr_set(dev_id, queue_id, sdt_no, &dump_element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_dump_addr_set");
+
+ dump_info_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DUMP_INFO_BUFF_SIZE, 0);
+ if (dump_info_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < block_num; i++) {
+ block_idx = p_tbl_cfg->block_array[i];
+
+ PMD_DRV_LOG(DEBUG, "block_idx: %u", block_idx);
+
+ etcam_dump_info.block_sel = block_idx;
+ etcam_dump_info.addr = 0;
+ etcam_dump_info.tb_width = 3;
+ etcam_dump_info.rd_mode = 0xFF;
+ etcam_dump_info.tb_depth = ZXDH_ETCAM_RAM_DEPTH;
+ etcam_dump_info.data_or_mask = ZXDH_ETCAM_DTYPE_DATA;
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &etcam_data_dst_phy_haddr,
+ &etcam_data_dst_phy_laddr);
+
+ zxdh_np_dtb_etcam_dump_entry(dev_id,
+ &etcam_dump_info,
+ etcam_data_dst_phy_haddr,
+ etcam_data_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += ZXDH_ETCAM_RAM_DEPTH * 640 / 8;
+ }
+
+ etcam_data_len_offset = dump_data_len;
+
+ for (i = 0; i < block_num; i++) {
+ block_idx = p_tbl_cfg->block_array[i];
+
+ PMD_DRV_LOG(DEBUG, "mask: block_idx: %u", block_idx);
+
+ etcam_dump_info.block_sel = block_idx;
+ etcam_dump_info.addr = 0;
+ etcam_dump_info.tb_width = 3;
+ etcam_dump_info.rd_mode = 0xFF;
+ etcam_dump_info.tb_depth = ZXDH_ETCAM_RAM_DEPTH;
+ etcam_dump_info.data_or_mask = ZXDH_ETCAM_DTYPE_MASK;
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &etcam_mask_dst_phy_haddr,
+ &etcam_mask_dst_phy_laddr);
+
+ zxdh_np_dtb_etcam_dump_entry(dev_id,
+ &etcam_dump_info,
+ etcam_mask_dst_phy_haddr,
+ etcam_mask_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += ZXDH_ETCAM_RAM_DEPTH * 640 / 8;
+ }
+ etcam_mask_len_offset = dump_data_len;
+
+ if (as_enable) {
+ zxdh_np_eram_index_cal(etcam_as_mode,
+ etcam_table_depth, &dump_eram_depth_128bit, &eram_col_index);
+
+ zxdh_np_dtb_tab_up_item_offset_addr_get(dev_id,
+ queue_id,
+ dump_element_id,
+ dump_data_len,
+ &as_rst_dst_phy_haddr,
+ &as_rst_dst_phy_laddr);
+
+ zxdh_np_dtb_smmu0_dump_entry(dev_id,
+ as_eram_baddr,
+ dump_eram_depth_128bit,
+ as_rst_dst_phy_haddr,
+ as_rst_dst_phy_laddr,
+ &dtb_dump_entry);
+
+ zxdh_np_dtb_data_write(dump_info_buff, dtb_desc_addr_offset, &dtb_dump_entry);
+
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ dtb_desc_len += 1;
+ dtb_desc_addr_offset += ZXDH_DTB_LEN_POS_SETP;
+ dump_data_len += dump_eram_depth_128bit * 128 / 8;
+ }
+
+ temp_dump_out_data = (uint8_t *)rte_zmalloc(NULL, dump_data_len * sizeof(uint8_t), 0);
+ if (temp_dump_out_data == NULL) {
+ PMD_DRV_LOG(ERR, "temp_dump_out_data point null!");
+ rte_free(dump_info_buff);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_write_dump_desc_info(dev_id,
+ queue_id,
+ dump_element_id,
+ (uint32_t *)dump_info_buff,
+ dump_data_len / 4,
+ dtb_desc_len * 4,
+ (uint32_t *)temp_dump_out_data);
+ rte_free(dump_info_buff);
+
+ p_data_start = temp_dump_out_data;
+ p_mask_start = temp_dump_out_data + etcam_data_len_offset;
+ if (as_enable)
+ p_rst_start = temp_dump_out_data + etcam_mask_len_offset;
+
+ for (handle = 0; handle < etcam_table_depth; handle++) {
+ p_dump_user_data = p_dump_data_arr + handle;
+
+ if (p_dump_user_data == NULL ||
+ p_dump_user_data->key_data == NULL ||
+ p_dump_user_data->key_mask == NULL) {
+ PMD_DRV_LOG(ERR, "etcam handle 0x%x data user buff is NULL!", handle);
+ rte_free(temp_dump_out_data);
+ return ZXDH_ERR;
+ }
+
+ if (as_enable) {
+ if (p_dump_user_data->p_as_rslt == NULL) {
+ PMD_DRV_LOG(ERR, "handle 0x%x data buff is NULL!", handle);
+ rte_free(temp_dump_out_data);
+ return ZXDH_ERR;
+ }
+ }
+
+ p_dump_user_data->handle = handle;
+
+ shift_amount = 8U >> etcam_key_mode;
+ mask_base = (1U << shift_amount) - 1;
+ offset = shift_amount * (handle % (1U << etcam_key_mode));
+ rd_mask = (mask_base << offset) & 0xFF;
+
+ addr_640bit = handle / (1U << etcam_key_mode);
+ p_data_640bit = p_data_start + addr_640bit * 640 / 8;
+ p_mask_640bit = p_mask_start + addr_640bit * 640 / 8;
+
+ zxdh_np_dtb_etcam_ind_data_get(p_data_640bit, rd_mask, entry_xy.p_data);
+ zxdh_np_dtb_etcam_ind_data_get(p_mask_640bit, rd_mask, entry_xy.p_mask);
+
+ zxdh_np_etcam_xy_to_dm(&entry_dm, &entry_xy, data_byte_size);
+
+ rte_memcpy(p_dump_user_data->key_data, entry_dm.p_data, data_byte_size);
+ rte_memcpy(p_dump_user_data->key_mask, entry_dm.p_mask, data_byte_size);
+
+ if (as_enable) {
+ zxdh_np_eram_index_cal(etcam_as_mode,
+ handle, &eram_row_index, &eram_col_index);
+
+ p_rst_128bit = p_rst_start + eram_row_index * ZXDH_DTB_LEN_POS_SETP;
+
+ eram_buff = (uint32_t *)p_rst_128bit;
+
+ if (etcam_as_mode == ZXDH_ERAM128_TBL_128b)
+ rte_memcpy(p_dump_user_data->p_as_rslt, eram_buff, (128 / 8));
+ else if (etcam_as_mode == ZXDH_ERAM128_TBL_64b)
+ rte_memcpy(p_dump_user_data->p_as_rslt,
+ eram_buff + ((1 - eram_col_index) << 1), (64 / 8));
+ }
+ }
+
+ *p_entry_num = etcam_table_depth;
+ *p_finish_flag = 1;
+
+ rte_free(temp_dump_out_data);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_smmu0_tbl_size_get(uint32_t eram_mode)
+{
+ uint32_t size = 0;
+ if (eram_mode == ZXDH_ERAM128_TBL_128b)
+ size = 16;
+ else if (eram_mode == ZXDH_ERAM128_TBL_64b)
+ size = 8;
+ else if (eram_mode == ZXDH_ERAM128_TBL_32b)
+ size = 4;
+ else
+ size = 1;
+
+ return size;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_data_get_by_handle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t index_num,
+ uint32_t *p_index_array,
+ uint8_t *p_dump_data)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t i = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t etcam_table_depth = 0;
+ uint32_t as_len = 0;
+ uint32_t data_byte_size = 0;
+ uint32_t entry_num = 0;
+ uint32_t finish_flag = 0;
+ uint8_t *data_buff = NULL;
+ uint8_t *mask_buff = NULL;
+ uint8_t *eram_buff = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_DTB_DUMP_INDEX_T start_index = {0};
+ ZXDH_DTB_DUMP_INDEX_T next_start_index = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dtb_acl_entry = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_temp_entry = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_dump_entry = NULL;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_etcam_info.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_etcam_info.table_type);
+ return ZXDH_ERR;
+ }
+
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ etcam_table_depth = sdt_etcam_info.etcam_table_depth;
+ as_len = zxdh_np_smmu0_tbl_size_get(sdt_etcam_info.as_rsp_mode);
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+
+ p_dtb_acl_entry = (ZXDH_DTB_ACL_ENTRY_INFO_T *)rte_zmalloc(NULL, etcam_table_depth *
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+ if (p_dtb_acl_entry == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * data_byte_size, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ mask_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * data_byte_size, 0);
+ if (mask_buff == NULL) {
+ PMD_DRV_LOG(ERR, "mask_buff point null!");
+ rte_free(data_buff);
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ eram_buff = (uint8_t *)rte_zmalloc(NULL, etcam_table_depth * as_len, 0);
+ if (eram_buff == NULL) {
+ PMD_DRV_LOG(ERR, "eram_buff point null!");
+ rte_free(mask_buff);
+ rte_free(data_buff);
+ rte_free(p_dtb_acl_entry);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < etcam_table_depth; i++) {
+ p_dtb_acl_entry[i].handle = i;
+ p_dtb_acl_entry[i].key_data = data_buff + i * data_byte_size;
+ p_dtb_acl_entry[i].key_mask = mask_buff + i * data_byte_size;
+ p_dtb_acl_entry[i].p_as_rslt = eram_buff + i * as_len;
+ }
+
+ rc = zxdh_np_dtb_acl_table_dump(dev_id,
+ queue_id,
+ sdt_no,
+ start_index,
+ p_dtb_acl_entry,
+ &entry_num,
+ &next_start_index,
+ &finish_flag);
+ if (rc != ZXDH_OK) {
+ PMD_DRV_LOG(ERR, "acl sdt[%u] dump fail, rc:0x%x", sdt_no, rc);
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ rte_free(eram_buff);
+ rte_free(p_dtb_acl_entry);
+ return rc;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_dump_entry = ((ZXDH_DTB_ACL_ENTRY_INFO_T *)p_dump_data) + i;
+ p_dump_entry->handle = p_index_array[i];
+ p_temp_entry = p_dtb_acl_entry + p_index_array[i];
+ rte_memcpy(p_dump_entry->key_data, p_temp_entry->key_data, data_byte_size);
+ rte_memcpy(p_dump_entry->key_mask, p_temp_entry->key_mask, data_byte_size);
+ rte_memcpy(p_dump_entry->p_as_rslt, p_temp_entry->p_as_rslt, as_len);
+ }
+
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ rte_free(eram_buff);
+ rte_free(p_dtb_acl_entry);
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_table_dump_by_vport(uint32_t dev_id, uint32_t queue_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t *entry_num, uint8_t *p_dump_data)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index_num = 0;
+ uint32_t eram_sdt_no = 0;
+ uint32_t *p_index_array = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_etcam_info.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_etcam_info.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ p_index_array = (uint32_t *)rte_zmalloc(NULL,
+ sizeof(uint32_t) * sdt_eram.eram_table_depth, 0);
+ if (p_index_array == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_acl_index_parse(dev_id, queue_id,
+ eram_sdt_no, vport, &index_num, p_index_array);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl index parse failed");
+ return ZXDH_ERR;
+ }
+
+ if (!index_num) {
+ PMD_DRV_LOG(ERR, "SDT[%u] vport[0x%x] item num is zero!", sdt_no, vport);
+ rte_free(p_index_array);
+ return ZXDH_OK;
+ }
+
+ rc = zxdh_np_dtb_acl_data_get_by_handle(dev_id, queue_id, sdt_no,
+ index_num, p_index_array, p_dump_data);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl date by handle failed");
+ return ZXDH_ERR;
+ }
+
+ *entry_num = index_num;
+ rte_free(p_index_array);
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_dma_insert_cycle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t as_eram_baddr = 0;
+ uint32_t as_enable = 0;
+ uint32_t etcam_table_id = 0;
+ uint32_t etcam_as_mode = 0;
+ uint32_t block_idx = 0;
+ uint32_t ram_addr = 0;
+ uint32_t etcam_wr_mode = 0;
+ uint32_t eram_wrt_mode = 0;
+ uint32_t eram_index = 0;
+
+ uint32_t item_cnt = 0;
+ uint32_t addr_offset_bk = 0;
+ uint32_t dtb_len = 0;
+ uint32_t as_addr_offset = 0;
+ uint32_t as_dtb_len = 0;
+
+ ZXDH_ACL_CFG_EX_T *p_acl_cfg = NULL;
+ ZXDH_ACL_TBL_CFG_T *p_tbl_cfg = NULL;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry = NULL;
+ uint32_t *p_as_eram_data = NULL;
+ uint8_t *table_data_buff = NULL;
+ ZXDH_ETCAM_ENTRY_T etcam_entry = {0};
+
+ uint8_t entry_data_buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint8_t entry_mask_buff[ZXDH_ETCAM_WIDTH_MAX / 8] = {0};
+ uint32_t as_eram_data_buff[4] = {0};
+ uint8_t entry_data_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t entry_mask_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ uint8_t as_eram_cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+
+ ZXDH_DTB_ENTRY_T entry_data = {0};
+ ZXDH_DTB_ENTRY_T entry_mask = {0};
+ ZXDH_DTB_ENTRY_T dtb_as_data_entry = {0};
+
+ entry_data.cmd = entry_data_cmd_buff;
+ entry_data.data = (uint8_t *)entry_data_buff;
+
+ entry_mask.cmd = entry_mask_cmd_buff;
+ entry_mask.data = (uint8_t *)entry_mask_buff;
+
+ dtb_as_data_entry.cmd = as_eram_cmd_buff;
+ dtb_as_data_entry.data = (uint8_t *)as_eram_data_buff;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+ etcam_table_id = sdt_etcam_info.etcam_table_id;
+ as_enable = sdt_etcam_info.as_en;
+ as_eram_baddr = sdt_etcam_info.as_eram_baddr;
+
+ if (as_enable) {
+ switch (etcam_as_mode) {
+ case ZXDH_ERAM128_TBL_128b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_128b;
+ break;
+ case ZXDH_ERAM128_TBL_64b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_64b;
+ break;
+ case ZXDH_ERAM128_TBL_1b:
+ eram_wrt_mode = ZXDH_ERAM128_OPR_1b;
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "etcam_as_mode is invalid!");
+ return ZXDH_ERR;
+ }
+ }
+
+ zxdh_np_acl_cfg_get(dev_id, &p_acl_cfg);
+
+ p_tbl_cfg = p_acl_cfg->acl_tbls + etcam_table_id;
+
+ if (!p_tbl_cfg->is_used) {
+ PMD_DRV_LOG(ERR, "table[ %u ] is not init!", etcam_table_id);
+ RTE_ASSERT(0);
+ return ZXDH_ACL_RC_TBL_NOT_INIT;
+ }
+
+ table_data_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0);
+ if (table_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) {
+ p_acl_entry = p_acl_entry_arr + item_cnt;
+
+ etcam_entry.mode = p_tbl_cfg->key_mode;
+ etcam_entry.p_data = p_acl_entry->key_data;
+ etcam_entry.p_mask = p_acl_entry->key_mask;
+
+ zxdh_np_acl_hdw_addr_get(p_tbl_cfg, p_acl_entry->handle,
+ &block_idx, &ram_addr, &etcam_wr_mode);
+
+ zxdh_np_dtb_etcam_entry_add(dev_id,
+ ram_addr,
+ block_idx,
+ etcam_wr_mode,
+ ZXDH_ETCAM_OPR_DM,
+ &etcam_entry,
+ &entry_data,
+ &entry_mask);
+
+ dtb_len += ZXDH_DTB_ETCAM_LEN_SIZE;
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset_bk, &entry_data);
+
+ memset(entry_data_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_data_buff, 0, ZXDH_ETCAM_WIDTH_MAX / 8);
+ addr_offset_bk = addr_offset_bk + ZXDH_DTB_ETCAM_LEN_SIZE * ZXDH_DTB_LEN_POS_SETP;
+
+ dtb_len += ZXDH_DTB_ETCAM_LEN_SIZE;
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset_bk, &entry_mask);
+
+ memset(entry_mask_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_mask_buff, 0, ZXDH_ETCAM_WIDTH_MAX / 8);
+ addr_offset_bk = addr_offset_bk + ZXDH_DTB_ETCAM_LEN_SIZE * ZXDH_DTB_LEN_POS_SETP;
+
+ if (as_enable) {
+ p_as_eram_data = (uint32_t *)(p_acl_entry->p_as_rslt);
+
+ zxdh_np_dtb_se_smmu0_ind_write(dev_id,
+ as_eram_baddr,
+ eram_index,
+ eram_wrt_mode,
+ p_as_eram_data,
+ &dtb_as_data_entry);
+
+ switch (eram_wrt_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ as_dtb_len = 2;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP * 2;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ as_dtb_len = 1;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ as_dtb_len = 1;
+ as_addr_offset = ZXDH_DTB_LEN_POS_SETP;
+ break;
+ }
+
+ zxdh_np_dtb_data_write(table_data_buff,
+ addr_offset_bk, &dtb_as_data_entry);
+ addr_offset_bk = addr_offset_bk + as_addr_offset;
+ dtb_len += as_dtb_len;
+
+ memset(as_eram_cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(as_eram_data_buff, 0, 4 * sizeof(uint32_t));
+ }
+ }
+
+ rc = zxdh_np_dtb_write_down_table_data(dev_id,
+ queue_id,
+ dtb_len * 16,
+ table_data_buff,
+ element_id);
+ rte_free(table_data_buff);
+
+ rc = zxdh_np_dtb_tab_down_success_status_check(dev_id, queue_id, *element_id);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_dma_insert(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_acl_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t as_enable;
+ uint32_t etcam_as_mode;
+ uint32_t entry_num_max = 0;
+ uint32_t entry_cycle = 0;
+ uint32_t entry_remains = 0;
+ uint32_t i = 0;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_entry = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_data_write");
+
+ as_enable = sdt_etcam_info.as_en;
+ etcam_as_mode = sdt_etcam_info.as_rsp_mode;
+
+ if (!as_enable) {
+ entry_num_max = 0x55;
+ } else {
+ if (etcam_as_mode == ZXDH_ERAM128_TBL_128b)
+ entry_num_max = 0x49;
+ else
+ entry_num_max = 0x4e;
+ }
+
+ entry_cycle = entry_num / entry_num_max;
+ entry_remains = entry_num % entry_num_max;
+
+ for (i = 0; i < entry_cycle; ++i) {
+ p_entry = p_acl_entry_arr + entry_num_max * i;
+ rc = zxdh_np_dtb_acl_dma_insert_cycle(dev_id,
+ queue_id,
+ sdt_no,
+ entry_num_max,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert_cycle");
+ }
+
+ if (entry_remains) {
+ p_entry = p_acl_entry_arr + entry_num_max * entry_cycle;
+ rc = zxdh_np_dtb_acl_dma_insert_cycle(dev_id,
+ queue_id,
+ sdt_no,
+ entry_remains,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert_cycle");
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_data_clear(uint32_t dev_id, uint32_t queue_id,
+ uint32_t sdt_no, uint32_t index_num, uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t data_byte_size = 0;
+ uint32_t index = 0;
+ uint32_t etcam_key_mode = 0;
+ uint32_t as_enable = 0;
+ uint32_t element_id = 0;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_etcam_info = {0};
+ ZXDH_DTB_ACL_ENTRY_INFO_T *p_entry_arr = NULL;
+
+ uint8_t *data_buff = NULL;
+ uint8_t *mask_buff = NULL;
+ uint32_t *eram_buff = NULL;
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_etcam_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+
+ etcam_key_mode = sdt_etcam_info.etcam_key_mode;
+ as_enable = sdt_etcam_info.as_en;
+ data_byte_size = ZXDH_ETCAM_ENTRY_SIZE_GET(etcam_key_mode);
+
+ p_entry_arr = (ZXDH_DTB_ACL_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+ if (p_entry_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, data_byte_size, 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ mask_buff = (uint8_t *)rte_zmalloc(NULL, data_byte_size, 0);
+ if (mask_buff == NULL) {
+ PMD_DRV_LOG(ERR, "mask_buff point null!");
+ rte_free(data_buff);
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ if (as_enable) {
+ eram_buff = (uint32_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (eram_buff == NULL) {
+ PMD_DRV_LOG(ERR, "eram_buff point null!");
+ rte_free(mask_buff);
+ rte_free(data_buff);
+ rte_free(p_entry_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+ memset(eram_buff, 0, 4 * sizeof(uint32_t));
+ }
+
+ for (index = 0; index < index_num; index++) {
+ p_entry_arr[index].handle = p_index_array[index];
+ p_entry_arr[index].key_data = data_buff;
+ p_entry_arr[index].key_mask = mask_buff;
+
+ if (as_enable)
+ p_entry_arr[index].p_as_rslt = (uint8_t *)eram_buff;
+ }
+
+ rc = zxdh_np_dtb_acl_dma_insert(dev_id,
+ queue_id,
+ sdt_no,
+ index_num,
+ p_entry_arr,
+ &element_id);
+ rte_free(data_buff);
+ rte_free(mask_buff);
+ if (eram_buff)
+ rte_free(eram_buff);
+
+ rte_free(p_entry_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_acl_dma_insert");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_acl_index_release_by_vport(uint32_t dev_id,
+ uint32_t sdt_no, uint32_t vport)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t eram_sdt_no = 0;
+ ZXDH_SPINLOCK_T *p_dtb_spinlock = NULL;
+ ZXDH_DEV_SPINLOCK_TYPE_E spinlock = ZXDH_DEV_SPINLOCK_T_DTB;
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ rc = zxdh_np_dev_opr_spinlock_get(dev_id, (uint32_t)spinlock, &p_dtb_spinlock);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dev_opr_spinlock_get");
+
+ rte_spinlock_lock(&p_dtb_spinlock->spinlock);
+
+ rc = zxdh_np_agent_channel_acl_index_release(dev_id,
+ ZXDH_ACL_INDEX_VPORT_REL, sdt_no, vport, 0);
+ if (rc == ZXDH_ACL_RC_SRH_FAIL)
+ PMD_DRV_LOG(ERR, "ACL_INDEX_VPORT_REL[vport:0x%x] index is not exist.", vport);
+
+ rte_spinlock_unlock(&p_dtb_spinlock->spinlock);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_smmu0_data_write_cycle(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t smmu0_base_addr,
+ uint32_t smmu0_wr_mode,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t item_cnt = 0;
+ uint32_t addr_offset = 0;
+ uint32_t dtb_len = 0;
+ uint32_t index = 0;
+
+ uint32_t *p_entry_data = NULL;
+ uint8_t *table_data_buff = NULL;
+ uint32_t entry_data_buff[4] = {0};
+ uint8_t cmd_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};
+ ZXDH_DTB_ENTRY_T dtb_one_entry = {0};
+
+ table_data_buff = (uint8_t *)rte_zmalloc(NULL, ZXDH_DTB_TABLE_DATA_BUFF_SIZE, 0);
+ if (table_data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ dtb_one_entry.cmd = cmd_buff;
+ dtb_one_entry.data = (uint8_t *)entry_data_buff;
+
+ for (item_cnt = 0; item_cnt < entry_num; ++item_cnt) {
+ p_entry_data = (uint32_t *)p_entry_arr[item_cnt].p_data;
+ index = p_entry_arr[item_cnt].index;
+
+ rc = zxdh_np_dtb_se_smmu0_ind_write(dev_id,
+ smmu0_base_addr,
+ index,
+ smmu0_wr_mode,
+ p_entry_data,
+ &dtb_one_entry);
+
+ switch (smmu0_wr_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ dtb_len += 2;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP * 2;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ dtb_len += 1;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ dtb_len += 1;
+ addr_offset = item_cnt * ZXDH_DTB_LEN_POS_SETP;
+ break;
+ }
+
+ zxdh_np_dtb_data_write(table_data_buff, addr_offset, &dtb_one_entry);
+ memset(cmd_buff, 0, ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8);
+ memset(entry_data_buff, 0, 4 * sizeof(uint32_t));
+ }
+
+ rc = zxdh_np_dtb_write_down_table_data(dev_id,
+ queue_id,
+ dtb_len * 16,
+ table_data_buff,
+ element_id);
+ rte_free(table_data_buff);
+
+ rc = zxdh_np_dtb_tab_down_success_status_check(dev_id, queue_id, *element_id);
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_smmu0_data_write(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t smmu0_base_addr,
+ uint32_t smmu0_wr_mode,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t i = 0;
+ uint32_t entry_num_max = 0;
+ uint32_t entry_cycle = 0;
+ uint32_t entry_remains = 0;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry = NULL;
+
+ switch (smmu0_wr_mode) {
+ case ZXDH_ERAM128_OPR_128b:
+ entry_num_max = 0x1ff;
+ break;
+ case ZXDH_ERAM128_OPR_64b:
+ entry_num_max = 0x3ff;
+ break;
+ case ZXDH_ERAM128_OPR_1b:
+ entry_num_max = 0x3ff;
+ break;
+ }
+
+ entry_cycle = entry_num / entry_num_max;
+ entry_remains = entry_num % entry_num_max;
+
+ for (i = 0; i < entry_cycle; ++i) {
+ p_entry = p_entry_arr + entry_num_max * i;
+ rc = zxdh_np_dtb_smmu0_data_write_cycle(dev_id,
+ queue_id,
+ smmu0_base_addr,
+ smmu0_wr_mode,
+ entry_num_max,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write_cycle");
+ }
+
+ if (entry_remains) {
+ p_entry = p_entry_arr + entry_num_max * entry_cycle;
+ rc = zxdh_np_dtb_smmu0_data_write_cycle(dev_id,
+ queue_id,
+ smmu0_base_addr,
+ smmu0_wr_mode,
+ entry_remains,
+ p_entry,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write_cycle");
+ }
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_dma_write(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t entry_num,
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_entry_arr,
+ uint32_t *element_id)
+{
+ uint32_t rc = ZXDH_OK;
+
+ uint32_t wrt_mode;
+ uint32_t base_addr;
+
+ ZXDH_SDT_TBL_ERAM_T sdt_eram_info = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_eram_info);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ base_addr = sdt_eram_info.eram_base_addr;
+ wrt_mode = sdt_eram_info.eram_mode;
+
+ switch (wrt_mode) {
+ case ZXDH_ERAM128_TBL_128b:
+ wrt_mode = ZXDH_ERAM128_OPR_128b;
+ break;
+ case ZXDH_ERAM128_TBL_64b:
+ wrt_mode = ZXDH_ERAM128_OPR_64b;
+ break;
+ case ZXDH_ERAM128_TBL_1b:
+ wrt_mode = ZXDH_ERAM128_OPR_1b;
+ break;
+ }
+
+ rc = zxdh_np_dtb_smmu0_data_write(dev_id,
+ queue_id,
+ base_addr,
+ wrt_mode,
+ entry_num,
+ p_entry_arr,
+ element_id);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write");
+
+ return ZXDH_OK;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_data_clear(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t sdt_no,
+ uint32_t index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t element_id = 0;
+ uint32_t i = 0;
+
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+
+ p_eram_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_eram_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_eram_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_eram_data_arr[i].index = p_index_array[i];
+ p_eram_data_arr[i].p_data = (uint32_t *)data_buff;
+ }
+
+ rc = zxdh_np_dtb_eram_dma_write(dev_id, queue_id,
+ sdt_no, index_num, p_eram_data_arr, &element_id);
+ rte_free(data_buff);
+ rte_free(p_eram_data_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_eram_dma_write");
+
+ return rc;
+}
+
+static uint32_t
+zxdh_np_dtb_eram_stat_data_clear(uint32_t dev_id,
+ uint32_t queue_id,
+ uint32_t counter_id,
+ ZXDH_STAT_CNT_MODE_E rd_mode,
+ uint32_t index_num,
+ uint32_t *p_index_array)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t element_id = 0;
+ uint32_t i = 0;
+ uint32_t wrt_mode = 0;
+ uint32_t start_addr = 0;
+ uint32_t counter_id_128bit = 0;
+
+ ZXDH_PPU_STAT_CFG_T stat_cfg = {0};
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *p_eram_data_arr = NULL;
+ uint8_t *data_buff = NULL;
+
+ zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg);
+
+ p_eram_data_arr = (ZXDH_DTB_ERAM_ENTRY_INFO_T *)rte_zmalloc(NULL, index_num *
+ sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+ if (p_eram_data_arr == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ data_buff = (uint8_t *)rte_zmalloc(NULL, 4 * sizeof(uint32_t), 0);
+ if (data_buff == NULL) {
+ PMD_DRV_LOG(ERR, "data_buff point null!");
+ rte_free(p_eram_data_arr);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ for (i = 0; i < index_num; i++) {
+ p_eram_data_arr[i].index = p_index_array[i];
+ p_eram_data_arr[i].p_data = (uint32_t *)data_buff;
+ }
+
+ wrt_mode = (rd_mode == ZXDH_STAT_128_MODE) ? ZXDH_ERAM128_OPR_128b : ZXDH_ERAM128_OPR_64b;
+ counter_id_128bit = (rd_mode == ZXDH_STAT_128_MODE) ? counter_id : (counter_id >> 1);
+ start_addr = stat_cfg.eram_baddr + counter_id_128bit;
+ rc = zxdh_np_dtb_smmu0_data_write(dev_id,
+ queue_id,
+ start_addr,
+ wrt_mode,
+ index_num,
+ p_eram_data_arr,
+ &element_id);
+ rte_free(data_buff);
+ rte_free(p_eram_data_arr);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_dtb_smmu0_data_write");
+
+ return rc;
+}
+
+uint32_t
+zxdh_np_dtb_acl_offline_delete(uint32_t dev_id, uint32_t queue_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t counter_id, uint32_t rd_mode)
+{
+ uint32_t rc = ZXDH_OK;
+ uint32_t index_num = 0;
+ uint32_t eram_sdt_no = 0;
+ uint32_t *p_index_array = NULL;
+
+ ZXDH_SDT_TBL_ETCAM_T sdt_acl = {0};
+ ZXDH_SDT_TBL_ERAM_T sdt_eram = {0};
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, sdt_no, &sdt_acl);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_acl.table_type != ZXDH_SDT_TBLT_ETCAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not etcam table!",
+ sdt_no, sdt_acl.table_type);
+ return ZXDH_ERR;
+ }
+
+ eram_sdt_no = zxdh_np_apt_get_sdt_partner(dev_id, sdt_no);
+
+ rc = zxdh_np_soft_sdt_tbl_get(dev_id, eram_sdt_no, &sdt_eram);
+ ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, "zxdh_np_soft_sdt_tbl_get");
+ if (sdt_eram.table_type != ZXDH_SDT_TBLT_ERAM) {
+ PMD_DRV_LOG(ERR, "SDT[%u] table_type[ %u ] is not eram table!",
+ eram_sdt_no, sdt_eram.table_type);
+ return ZXDH_ERR;
+ }
+
+ p_index_array = (uint32_t *)rte_zmalloc(NULL,
+ sizeof(uint32_t) * sdt_eram.eram_table_depth, 0);
+ if (p_index_array == NULL) {
+ PMD_DRV_LOG(ERR, "%s point null!", __func__);
+ return ZXDH_PAR_CHK_POINT_NULL;
+ }
+
+ rc = zxdh_np_dtb_acl_index_parse(dev_id, queue_id,
+ eram_sdt_no, vport, &index_num, p_index_array);
+ if (rc != ZXDH_OK) {
+ rte_free(p_index_array);
+ PMD_DRV_LOG(ERR, "acl index parse failed");
+ return ZXDH_ERR;
+ }
+
+ if (!index_num) {
+ PMD_DRV_LOG(ERR, "SDT[%u] vport[0x%x] item num is zero!", sdt_no, vport);
+ rte_free(p_index_array);
+ return ZXDH_OK;
+ }
+
+ rc = zxdh_np_dtb_acl_data_clear(dev_id, queue_id, sdt_no, index_num, p_index_array);
+ rc = zxdh_np_dtb_eram_data_clear(dev_id, queue_id, eram_sdt_no, index_num, p_index_array);
+ rc = zxdh_np_dtb_eram_stat_data_clear(dev_id, queue_id,
+ counter_id, rd_mode, index_num, p_index_array);
+ rte_free(p_index_array);
+
+ rc = zxdh_np_dtb_acl_index_release_by_vport(dev_id, sdt_no, vport);
+
+ return rc;
+}
diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h
index 1b8f17474d..e3457a8c90 100644
--- a/drivers/net/zxdh/zxdh_np.h
+++ b/drivers/net/zxdh/zxdh_np.h
@@ -1751,6 +1751,15 @@ typedef enum zxdh_profile_type {
CAR_MAX
} ZXDH_PROFILE_TYPE;
+typedef enum zxdh_msg_acl_index_oper_e {
+ ZXDH_ACL_INDEX_REQUEST = 0,
+ ZXDH_ACL_INDEX_RELEASE = 1,
+ ZXDH_ACL_INDEX_VPORT_REL = 2,
+ ZXDH_ACL_INDEX_ALL_REL = 3,
+ ZXDH_ACL_INDEX_STAT_CLR = 4,
+ ZXDH_ACL_INDEX_MAX
+} ZXDH_MSG_ACL_INDEX_OPER_E;
+
typedef struct __rte_aligned(2) zxdh_version_compatible_reg_t {
uint8_t version_compatible_item;
uint8_t major;
@@ -1915,6 +1924,18 @@ typedef struct zxdh_dtb_dump_index_t {
uint32_t index_type;
} ZXDH_DTB_DUMP_INDEX_T;
+typedef struct __rte_aligned(2) zxdh_agent_channel_acl_msg_t {
+ uint8_t dev_id;
+ uint8_t type;
+ uint8_t oper;
+ uint8_t rsv;
+ uint32_t sdt_no;
+ uint32_t vport;
+ uint32_t index;
+ uint32_t counter_id;
+ uint32_t rd_mode;
+} ZXDH_AGENT_CHANNEL_ACL_MSG_T;
+
int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);
int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);
int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,
@@ -1958,5 +1979,13 @@ uint32_t zxdh_np_dtb_hash_offline_delete(uint32_t dev_id,
uint32_t queue_id,
uint32_t sdt_no,
__rte_unused uint32_t flush_mode);
-
+uint32_t zxdh_np_dtb_acl_index_request(uint32_t dev_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t *p_index);
+
+uint32_t zxdh_np_dtb_acl_index_release(uint32_t dev_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t index);
+uint32_t zxdh_np_dtb_acl_table_dump_by_vport(uint32_t dev_id, uint32_t queue_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t *entry_num, uint8_t *p_dump_data);
+uint32_t zxdh_np_dtb_acl_offline_delete(uint32_t dev_id, uint32_t queue_id,
+ uint32_t sdt_no, uint32_t vport, uint32_t counter_id, uint32_t rd_mode);
#endif /* ZXDH_NP_H */
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 136195 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2 2/2] net/zxdh: add support flow director ops
2025-06-18 7:49 ` [PATCH v2 0/2] " Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
@ 2025-06-18 7:49 ` Bingbin Chen
2025-06-30 16:56 ` Stephen Hemminger
1 sibling, 1 reply; 9+ messages in thread
From: Bingbin Chen @ 2025-06-18 7:49 UTC (permalink / raw)
To: stephen, wang.junlong1, yang.yonggang; +Cc: dev, Bingbin Chen
[-- Attachment #1.1.1: Type: text/plain, Size: 101895 bytes --]
Provide support for ETH, VLAN, IPv4/IPv6, TCP/UDP, VXLAN, and mask matching,
supporting multiple actions include drop/count/mark/queue/rss,and vxlan decap/encap.
Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
---
doc/guides/nics/features/zxdh.ini | 16 +
doc/guides/nics/zxdh.rst | 1 +
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_common.h | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 27 +
drivers/net/zxdh/zxdh_ethdev.h | 13 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 2 +-
drivers/net/zxdh/zxdh_ethdev_ops.h | 1 +
drivers/net/zxdh/zxdh_flow.c | 2004 ++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_flow.h | 237 ++++
drivers/net/zxdh/zxdh_msg.c | 263 +++-
drivers/net/zxdh/zxdh_msg.h | 31 +-
drivers/net/zxdh/zxdh_tables.h | 10 +-
13 files changed, 2536 insertions(+), 71 deletions(-)
create mode 100644 drivers/net/zxdh/zxdh_flow.c
create mode 100644 drivers/net/zxdh/zxdh_flow.h
diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini
index 277e17a584..bd20838676 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -34,5 +34,21 @@ Extended stats = Y
FW version = Y
Module EEPROM dump = Y
+[rte_flow items]
+eth = Y
+ipv4 = Y
+ipv6 = Y
+sctp = Y
+tcp = Y
+udp = Y
+vlan = Y
+vxlan = Y
+
[rte_flow actions]
drop = Y
+count = Y
+mark = Y
+queue = Y
+rss = Y
+vxlan_decap = Y
+vxlan_encap = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 372cb5b44f..47dabde97e 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -41,6 +41,7 @@ Features of the ZXDH PMD are:
- Hardware TSO for generic IP or UDP tunnel, including VXLAN
- Extended statistics query
- Ingress meter support
+- Flow API
Driver compilation and testing
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index a48a0d43c2..120cac5879 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -24,4 +24,5 @@ sources = files(
'zxdh_rxtx.c',
'zxdh_ethdev_ops.c',
'zxdh_mtr.c',
+ 'zxdh_flow.c',
)
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
index c151101bbc..6d78ae0273 100644
--- a/drivers/net/zxdh/zxdh_common.h
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -14,6 +14,7 @@
#define ZXDH_VF_LOCK_REG 0x90
#define ZXDH_VF_LOCK_ENABLE_MASK 0x1
#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX 10
+#define VF_IDX(pcie_id) ((pcie_id) & 0xff)
struct zxdh_res_para {
uint64_t virt_addr;
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 80053678cb..3b9cb6fa63 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -1228,6 +1228,11 @@ zxdh_dev_close(struct rte_eth_dev *dev)
return -1;
}
+ if (zxdh_shared_data != NULL) {
+ zxdh_mtr_release(dev);
+ zxdh_flow_release(dev);
+ }
+
zxdh_intr_release(dev);
zxdh_np_uninit(dev);
zxdh_pci_reset(hw);
@@ -1428,6 +1433,7 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
.get_module_eeprom = zxdh_dev_get_module_eeprom,
.dev_supported_ptypes_get = zxdh_dev_supported_ptypes_get,
.mtr_ops_get = zxdh_meter_ops_get,
+ .flow_ops_get = zxdh_flow_ops_get,
};
static int32_t
@@ -1504,6 +1510,8 @@ zxdh_dtb_dump_res_init(struct zxdh_hw *hw, ZXDH_DEV_INIT_CTRL_T *dpp_ctrl)
{"sdt_mc_table1", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE1, NULL},
{"sdt_mc_table2", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE2, NULL},
{"sdt_mc_table3", 5 * 1024 * 1024, ZXDH_SDT_MC_TABLE3, NULL},
+ {"sdt_acl_index_mng", 4 * 1024 * 1024, 30, NULL},
+ {"sdt_fd_table", 4 * 1024 * 1024, ZXDH_SDT_FD_TABLE, NULL},
};
struct zxdh_dev_shared_data *dev_sd = hw->dev_sd;
@@ -1723,6 +1731,7 @@ zxdh_free_sh_res(void)
rte_spinlock_lock(&zxdh_shared_data_lock);
if (zxdh_shared_data != NULL && zxdh_shared_data->init_done &&
(--zxdh_shared_data->dev_refcnt == 0)) {
+ rte_mempool_free(zxdh_shared_data->flow_mp);
rte_mempool_free(zxdh_shared_data->mtr_mp);
rte_mempool_free(zxdh_shared_data->mtr_profile_mp);
rte_mempool_free(zxdh_shared_data->mtr_policy_mp);
@@ -1734,6 +1743,7 @@ zxdh_free_sh_res(void)
static int
zxdh_init_sh_res(struct zxdh_shared_data *sd)
{
+ const char *MZ_ZXDH_FLOW_MP = "zxdh_flow_mempool";
const char *MZ_ZXDH_MTR_MP = "zxdh_mtr_mempool";
const char *MZ_ZXDH_MTR_PROFILE_MP = "zxdh_mtr_profile_mempool";
const char *MZ_ZXDH_MTR_POLICY_MP = "zxdh_mtr_policy_mempool";
@@ -1743,6 +1753,13 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)
struct rte_mempool *mtr_policy_mp = NULL;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ flow_mp = rte_mempool_create(MZ_ZXDH_FLOW_MP, ZXDH_MAX_FLOW_NUM,
+ sizeof(struct zxdh_flow), 64, 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (flow_mp == NULL) {
+ PMD_DRV_LOG(ERR, "Cannot allocate zxdh flow mempool");
+ goto error;
+ }
mtr_mp = rte_mempool_create(MZ_ZXDH_MTR_MP, ZXDH_MAX_MTR_NUM,
sizeof(struct zxdh_mtr_object), 64, 0,
NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
@@ -1765,6 +1782,7 @@ zxdh_init_sh_res(struct zxdh_shared_data *sd)
PMD_DRV_LOG(ERR, "Cannot allocate zxdh mtr profile mempool");
goto error;
}
+ sd->flow_mp = flow_mp;
sd->mtr_mp = mtr_mp;
sd->mtr_profile_mp = mtr_profile_mp;
sd->mtr_policy_mp = mtr_policy_mp;
@@ -1814,6 +1832,7 @@ zxdh_init_once(struct rte_eth_dev *eth_dev)
ret = zxdh_init_sh_res(sd);
if (ret != 0)
goto out;
+ zxdh_flow_global_init();
rte_spinlock_init(&g_mtr_res.hw_plcr_res_lock);
memset(&g_mtr_res, 0, sizeof(g_mtr_res));
sd->init_done = true;
@@ -1837,10 +1856,17 @@ zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)
ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);
if (ret)
PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);
+
sdt_no = ZXDH_SDT_MC_TABLE0 + hw->hash_search_index;
ret = zxdh_np_dtb_hash_offline_delete(hw->dev_id, dtb_data->queueid, sdt_no, 0);
if (ret)
PMD_DRV_LOG(ERR, "sdt_no %d delete failed. code:%d ", sdt_no, ret);
+
+ ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport,
+ ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow offline delete failed. code:%d", ret);
}
return ret;
}
@@ -2064,6 +2090,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
if (ret)
goto err_zxdh_init;
+ zxdh_flow_init(eth_dev);
zxdh_queue_res_get(eth_dev);
zxdh_msg_cb_reg(hw);
if (zxdh_priv_res_init(hw) != 0)
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 169af209a2..8e465d66b6 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -11,6 +11,7 @@
#include <eal_interrupts.h>
#include "zxdh_mtr.h"
+#include "zxdh_flow.h"
/* ZXDH PCI vendor/device ID. */
#define ZXDH_PCI_VENDOR_ID 0x1cf2
@@ -54,6 +55,7 @@
#define ZXDH_SLOT_MAX 256
#define ZXDH_MAX_VF 256
#define ZXDH_HASHIDX_MAX 6
+#define ZXDH_RSS_HASH_KEY_LEN 40U
union zxdh_virport_num {
uint16_t vport;
@@ -129,7 +131,10 @@ struct zxdh_hw {
uint8_t is_pf : 1,
rsv : 1,
i_mtr_en : 1,
- e_mtr_en : 1;
+ e_mtr_en : 1,
+ i_flow_en : 1,
+ e_flow_en : 1,
+ vxlan_flow_en : 1;
uint8_t msg_chan_init;
uint8_t phyport;
uint8_t panel_id;
@@ -149,7 +154,10 @@ struct zxdh_hw {
uint16_t queue_pool_count;
uint16_t queue_pool_start;
uint8_t dl_net_hdr_len;
- uint8_t rsv1[3];
+ uint16_t vxlan_fd_num;
+ uint8_t rsv1[1];
+
+ struct dh_flow_list dh_flow_list;
};
struct zxdh_dtb_shared_data {
@@ -174,6 +182,7 @@ struct zxdh_shared_data {
int32_t np_init_done;
uint32_t dev_refcnt;
struct zxdh_dtb_shared_data *dtb_data;
+ struct rte_mempool *flow_mp;
struct rte_mempool *mtr_mp;
struct rte_mempool *mtr_profile_mp;
struct rte_mempool *mtr_policy_mp;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index f8e8d26c50..10a174938e 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -1056,7 +1056,7 @@ zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
return ret;
}
-static uint16_t
+uint16_t
zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid)
{
struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h
index 97a1eb4532..a83b808934 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.h
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h
@@ -141,5 +141,6 @@ int zxdh_dev_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw
int zxdh_dev_get_module_info(struct rte_eth_dev *dev, struct rte_eth_dev_module_info *modinfo);
int zxdh_dev_get_module_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *info);
int zxdh_meter_ops_get(struct rte_eth_dev *dev, void *arg);
+uint16_t zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid);
#endif /* ZXDH_ETHDEV_OPS_H */
diff --git a/drivers/net/zxdh/zxdh_flow.c b/drivers/net/zxdh/zxdh_flow.c
new file mode 100644
index 0000000000..5164d3a5f0
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.c
@@ -0,0 +1,2004 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+#include <rte_bitmap.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_logs.h"
+#include "zxdh_flow.h"
+#include "zxdh_tables.h"
+#include "zxdh_ethdev_ops.h"
+#include "zxdh_np.h"
+#include "zxdh_msg.h"
+
+#define ZXDH_IPV6_FRAG_HEADER 44
+#define ZXDH_TENANT_ARRAY_NUM 3
+#define ZXDH_VLAN_TCI_MASK 0xFFFF
+#define ZXDH_VLAN_PRI_MASK 0xE000
+#define ZXDH_VLAN_CFI_MASK 0x1000
+#define ZXDH_VLAN_VID_MASK 0x0FFF
+#define MAX_STRING_LEN 8192
+#define FLOW_INGRESS 0
+#define FLOW_EGRESS 1
+#define MAX_ENCAP1_NUM (256)
+#define INVALID_HANDLEIDX 0xffff
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM (6)
+static struct dh_engine_list flow_engine_list = TAILQ_HEAD_INITIALIZER(flow_engine_list);
+static struct count_res flow_count_ref[MAX_FLOW_COUNT_NUM];
+static rte_spinlock_t fd_hw_res_lock = RTE_SPINLOCK_INITIALIZER;
+static uint8_t fd_hwres_bitmap[ZXDH_MAX_FLOW_NUM] = {0};
+
+#define MKDUMPSTR(buf, buf_size, cur_len, ...) \
+do { \
+ if ((cur_len) >= (buf_size)) \
+ break; \
+ (cur_len) += snprintf((buf) + (cur_len), (buf_size) - (cur_len), __VA_ARGS__); \
+} while (0)
+
+static inline void
+print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr,
+ char print_buf[], uint32_t buf_size, uint32_t *cur_len)
+{
+ char buf[RTE_ETHER_ADDR_FMT_SIZE];
+
+ rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, "%s%s", what, buf);
+}
+
+static inline void
+zxdh_fd_flow_free_dtbentry(ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ rte_free(dtb_entry->p_entry_data);
+ dtb_entry->p_entry_data = NULL;
+ dtb_entry->sdt_no = 0;
+}
+
+static void
+data_bitwise(void *data, int bytecnt)
+{
+ int i;
+ uint32_t *temp = (uint32_t *)data;
+ int remain = bytecnt % 4;
+ for (i = 0; i < (bytecnt >> 2); i++) {
+ *(temp) = ~*(temp);
+ temp++;
+ }
+
+ if (remain) {
+ for (i = 0; i < remain; i++) {
+ uint8_t *tmp = (uint8_t *)temp;
+ *(uint8_t *)tmp = ~*(uint8_t *)tmp;
+ tmp++;
+ }
+ }
+}
+
+static void
+zxdh_adjust_flow_op_rsp_memory_layout(void *old_data,
+ size_t old_size, void *new_data)
+{
+ rte_memcpy(new_data, old_data, sizeof(struct zxdh_flow));
+ memset((char *)new_data + sizeof(struct zxdh_flow), 0, 4);
+ rte_memcpy((char *)new_data + sizeof(struct zxdh_flow) + 4,
+ (char *)old_data + sizeof(struct zxdh_flow),
+ old_size - sizeof(struct zxdh_flow));
+}
+
+void zxdh_flow_global_init(void)
+{
+ int i;
+ for (i = 0; i < MAX_FLOW_COUNT_NUM; i++) {
+ rte_spinlock_init(&flow_count_ref[i].count_lock);
+ flow_count_ref[i].count_ref = 0;
+ }
+}
+
+static void
+__entry_dump(char *print_buf, uint32_t buf_size,
+ uint32_t *cur_len, struct fd_flow_key *key)
+{
+ print_ether_addr("\nL2\t dst=", &key->mac_dst, print_buf, buf_size, cur_len);
+ print_ether_addr(" - src=", &key->mac_src, print_buf, buf_size, cur_len);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -eth type=0x%04x", key->ether_type);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " -vlan_pri=0x%02x -vlan_vlanid=0x%04x -vlan_tci=0x%04x ",
+ key->cvlan_pri, key->cvlan_vlanid, key->vlan_tci);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " -vni=0x%02x 0x%02x 0x%02x\n", key->vni[0], key->vni[1], key->vni[2]);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "L3\t dstip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",
+ *(uint32_t *)key->dst_ip, *((uint32_t *)key->dst_ip + 1),
+ *((uint32_t *)key->dst_ip + 2),
+ *((uint32_t *)key->dst_ip + 3),
+ IPv6_BYTES(key->dst_ip));
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "\t srcip=0x%08x 0x%08x 0x%08x 0x%08x("IPv6_BYTES_FMT")\n",
+ *((uint32_t *)key->src_ip), *((uint32_t *)key->src_ip + 1),
+ *((uint32_t *)key->src_ip + 2),
+ *((uint32_t *)key->src_ip + 3),
+ IPv6_BYTES(key->src_ip));
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ " \t tos=0x%02x -nw-proto=0x%02x -frag-flag %u\n",
+ key->tos, key->nw_proto, key->frag_flag);
+ MKDUMPSTR(print_buf, buf_size, *cur_len,
+ "L4\t dstport=0x%04x -srcport=0x%04x", key->tp_dst, key->tp_src);
+}
+
+static void
+__result_dump(char *print_buf, uint32_t buf_size,
+ uint32_t *cur_len, struct fd_flow_result *res)
+{
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -hit_flag = 0x%04x", res->hit_flag);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -action_idx = 0x%02x", res->action_idx);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -qid = 0x%04x", res->qid);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -mark_id = 0x%08x", res->mark_fd_id);
+ MKDUMPSTR(print_buf, buf_size, *cur_len, " -count_id = 0x%02x", res->countid);
+}
+
+static void offlow_key_dump(struct fd_flow_key *key, struct fd_flow_key *key_mask, FILE *file)
+{
+ char print_buf[MAX_STRING_LEN];
+ uint32_t buf_size = MAX_STRING_LEN;
+ uint32_t cur_len = 0;
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "offload key:\n\t");
+ __entry_dump(print_buf, buf_size, &cur_len, key);
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\noffload key_mask:\n\t");
+ __entry_dump(print_buf, buf_size, &cur_len, key_mask);
+
+ PMD_DRV_LOG(INFO, "%s", print_buf);
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+ if (file)
+ fputs(print_buf, file);
+}
+
+static void offlow_result_dump(struct fd_flow_result *res, FILE *file)
+{
+ char print_buf[MAX_STRING_LEN];
+ uint32_t buf_size = MAX_STRING_LEN;
+ uint32_t cur_len = 0;
+
+ MKDUMPSTR(print_buf, buf_size, cur_len, "offload result:\n");
+ __result_dump(print_buf, buf_size, &cur_len, res);
+ PMD_DRV_LOG(INFO, "%s", print_buf);
+ PMD_DRV_LOG(INFO, "memdump : ===result ===");
+ MKDUMPSTR(print_buf, buf_size, cur_len, "\n");
+ if (file)
+ fputs(print_buf, file);
+}
+
+static int
+set_flow_enable(struct rte_eth_dev *dev, uint8_t dir,
+ bool enable, struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_port_attr_table port_attr = {0};
+ int ret = 0;
+
+ if (priv->is_pf) {
+ ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get port_attr failed");
+ return -1;
+ }
+ port_attr.fd_enable = enable;
+
+ ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "write port_attr failed");
+ return -1;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+ attr_msg->mode = ZXDH_PORT_FD_EN_OFF_FLAG;
+ attr_msg->value = enable;
+ zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+ }
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow enable failed", priv->port_id);
+ return -rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Meter enable failed.");
+ }
+ if (dir == FLOW_INGRESS)
+ priv->i_flow_en = !!enable;
+ else
+ priv->e_flow_en = !!enable;
+
+ return ret;
+}
+
+static int
+set_vxlan_enable(struct rte_eth_dev *dev, bool enable, struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_port_attr_table port_attr = {0};
+ int ret = 0;
+
+ if (priv->vxlan_flow_en == !!enable)
+ return 0;
+ if (priv->is_pf) {
+ ret = zxdh_get_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "get port_attr failed");
+ return -1;
+ }
+ port_attr.fd_enable = enable;
+
+ ret = zxdh_set_port_attr(priv, priv->vport.vport, &port_attr);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "write port_attr failed");
+ return -1;
+ }
+ } else {
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_port_attr_set_msg *attr_msg = &msg_info.data.port_attr_msg;
+
+ attr_msg->mode = ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF;
+ attr_msg->value = enable;
+
+ zxdh_msg_head_build(priv, ZXDH_PORT_ATTRS_SET, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(msg_info), NULL, 0);
+ }
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d vxlan flow enable failed", priv->port_id);
+ return -rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "vxlan offload enable failed.");
+ }
+ priv->vxlan_flow_en = !!enable;
+ return ret;
+}
+
+void zxdh_register_flow_engine(struct dh_flow_engine *engine)
+{
+ TAILQ_INSERT_TAIL(&flow_engine_list, engine, node);
+}
+
+static void zxdh_flow_free(struct zxdh_flow *dh_flow)
+{
+ if (dh_flow)
+ rte_mempool_put(zxdh_shared_data->flow_mp, dh_flow);
+}
+
+static struct dh_flow_engine *zxdh_get_flow_engine(struct rte_eth_dev *dev __rte_unused)
+{
+ struct dh_flow_engine *engine = NULL;
+ void *temp;
+
+ RTE_TAILQ_FOREACH_SAFE(engine, &flow_engine_list, node, temp) {
+ if (engine->type == FLOW_TYPE_FD_TCAM)
+ break;
+ }
+ return engine;
+}
+
+static int
+zxdh_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct dh_flow_engine *flow_engine = NULL;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL || flow_engine->parse_pattern_action == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+ if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, NULL) != 0)
+ return -rte_errno;
+ return 0;
+}
+
+static struct zxdh_flow *flow_exist_check(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *entry;
+ struct zxdh_flow *entry_flow;
+
+ TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {
+ entry_flow = (struct zxdh_flow *)entry->driver_flow;
+ if ((memcmp(&entry_flow->flowentry.fd_flow.key, &dh_flow->flowentry.fd_flow.key,
+ sizeof(struct fd_flow_key)) == 0) &&
+ (memcmp(&entry_flow->flowentry.fd_flow.key_mask,
+ &dh_flow->flowentry.fd_flow.key_mask,
+ sizeof(struct fd_flow_key)) == 0)) {
+ return entry_flow;
+ }
+ }
+ return NULL;
+}
+
+static struct rte_flow *
+zxdh_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ struct zxdh_flow *dh_flow = NULL;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+
+ if (flow_engine == NULL ||
+ flow_engine->parse_pattern_action == NULL ||
+ flow_engine->apply == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return NULL;
+ }
+
+ flow = rte_zmalloc("rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "flow malloc failed");
+ return NULL;
+ }
+ ret = rte_mempool_get(zxdh_shared_data->flow_mp, (void **)&dh_flow);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory from flowmp");
+ goto free_flow;
+ }
+ memset(dh_flow, 0, sizeof(struct zxdh_flow));
+ if (flow_engine->parse_pattern_action(dev, attr, pattern, actions, error, dh_flow) != 0) {
+ PMD_DRV_LOG(ERR, "parse_pattern_action failed zxdh_created failed");
+ goto free_flow;
+ }
+
+ if (flow_exist_check(dev, dh_flow) != NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "flow repeat .no add again");
+ goto free_flow;
+ }
+
+ ret = flow_engine->apply(dev, dh_flow, error, hw->vport.vport, hw->pcie_id);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "apply failed flow created failed");
+ goto free_flow;
+ }
+ flow->driver_flow = dh_flow;
+ flow->port_id = dev->data->port_id;
+ flow->type = ZXDH_FLOW_GROUP_TCAM;
+ TAILQ_INSERT_TAIL(&hw->dh_flow_list, flow, next);
+
+ if (hw->i_flow_en == 0) {
+ ret = set_flow_enable(dev, FLOW_INGRESS, 1, error);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "set flow enable failed");
+ goto free_flow;
+ }
+ }
+ return flow;
+free_flow:
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+zxdh_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_flow *dh_flow = NULL;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL ||
+ flow_engine->destroy == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "invalid flow");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "invalid flow");
+ return -1;
+ }
+ ret = flow_engine->destroy(dev, dh_flow, error, priv->vport.vport, priv->pcie_id);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ return -rte_errno;
+ }
+ TAILQ_REMOVE(&priv->dh_flow_list, flow, next);
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+
+ if (TAILQ_EMPTY(&priv->dh_flow_list)) {
+ ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "clear flow enable failed");
+ return -rte_errno;
+ }
+ }
+ return ret;
+}
+
+
+static int
+zxdh_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data, struct rte_flow_error *error)
+{
+ struct zxdh_flow *dh_flow;
+ int ret = 0;
+ struct dh_flow_engine *flow_engine = NULL;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+
+ if (flow_engine == NULL ||
+ flow_engine->query_count == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot find valid flow engine.");
+ return -rte_errno;
+ }
+
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "flow is not exist");
+ return -1;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_engine->query_count(dev, dh_flow,
+ (struct rte_flow_query_count *)data, error);
+ break;
+ default:
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ goto out;
+ }
+ }
+out:
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow query failed");
+ return ret;
+}
+
+static int zxdh_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+ struct zxdh_flow *dh_flow = NULL;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_dtb_shared_data *dtb_data = &hw->dev_sd->dtb_sd;
+ struct dh_flow_engine *flow_engine = NULL;
+ struct zxdh_msg_info msg_info = {0};
+ uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+ int ret = 0;
+
+ flow_engine = zxdh_get_flow_engine(dev);
+ if (flow_engine == NULL) {
+ PMD_DRV_LOG(ERR, "get flow engine failed");
+ return -1;
+ }
+ ret = set_flow_enable(dev, FLOW_INGRESS, 0, error);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "clear flow enable failed");
+ return ret;
+ }
+
+ ret = set_vxlan_enable(dev, 0, error);
+ if (ret)
+ PMD_DRV_LOG(ERR, "clear vxlan enable failed");
+ hw->vxlan_fd_num = 0;
+
+ if (hw->is_pf) {
+ ret = zxdh_np_dtb_acl_offline_delete(hw->dev_id, dtb_data->queueid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport,
+ ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s flush failed. code:%d", dev->data->name, ret);
+ } else {
+ zxdh_msg_head_build(hw, ZXDH_FLOW_HW_FLUSH, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+ (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow op %d flush failed ret %d",
+ hw->port_id, ZXDH_FLOW_HW_FLUSH, ret);
+ return -1;
+ }
+ }
+
+ /* Remove all flows */
+ while ((flow = TAILQ_FIRST(&hw->dh_flow_list))) {
+ TAILQ_REMOVE(&hw->dh_flow_list, flow, next);
+ if (flow->driver_flow)
+ dh_flow = (struct zxdh_flow *)flow->driver_flow;
+ if (dh_flow == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid flow Failed to destroy flow.");
+ ret = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Invalid flow ,flush failed");
+ return ret;
+ }
+
+ zxdh_flow_free(dh_flow);
+ rte_free(flow);
+ }
+ return ret;
+}
+
+static void
+handle_res_dump(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ uint16_t hwres_base = priv->vport.pfid << 10;
+ uint16_t hwres_cnt = ZXDH_MAX_FLOW_NUM >> 1;
+ uint16_t i;
+
+ PMD_DRV_LOG(DEBUG, "hwres_base %d", hwres_base);
+ rte_spinlock_lock(&fd_hw_res_lock);
+ for (i = 0; i < hwres_cnt; i++) {
+ if (fd_hwres_bitmap[hwres_base + i] == 1)
+ PMD_DRV_LOG(DEBUG, "used idx %d", i + hwres_base);
+ }
+ rte_spinlock_unlock(&fd_hw_res_lock);
+}
+
+static int
+zxdh_flow_dev_dump(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ FILE *file,
+ struct rte_flow_error *error __rte_unused)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct rte_flow *entry;
+ struct zxdh_flow *entry_flow;
+ uint32_t dtb_qid = 0;
+ uint32_t entry_num = 0;
+ uint16_t ret = 0;
+ ZXDH_DTB_ACL_ENTRY_INFO_T *fd_entry = NULL;
+ uint8_t *key = NULL;
+ uint8_t *key_mask = NULL;
+ uint8_t *result = NULL;
+
+ if (flow) {
+ entry_flow = flow_exist_check(dev, (struct zxdh_flow *)flow->driver_flow);
+ if (entry_flow) {
+ PMD_DRV_LOG(DEBUG, "handle idx %d:", entry_flow->flowentry.hw_idx);
+ offlow_key_dump(&entry_flow->flowentry.fd_flow.key,
+ &entry_flow->flowentry.fd_flow.key_mask, file);
+ offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);
+ }
+ } else {
+ if (hw->is_pf) {
+ dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ fd_entry = rte_malloc(NULL,
+ sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T) * ZXDH_MAX_FLOW_NUM, 1);
+ key = rte_malloc(NULL, sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);
+ key_mask = rte_malloc(NULL,
+ sizeof(struct fd_flow_key) * ZXDH_MAX_FLOW_NUM, 1);
+ result = rte_malloc(NULL,
+ sizeof(struct fd_flow_result) * ZXDH_MAX_FLOW_NUM, 1);
+ if (!fd_entry || !key || !key_mask || !result) {
+ PMD_DRV_LOG(ERR, "fd_entry malloc failed!");
+ goto end;
+ }
+
+ for (int i = 0; i < ZXDH_MAX_FLOW_NUM; i++) {
+ fd_entry[i].key_data = key + i * sizeof(struct fd_flow_key);
+ fd_entry[i].key_mask = key_mask + i * sizeof(struct fd_flow_key);
+ fd_entry[i].p_as_rslt = result + i * sizeof(struct fd_flow_result);
+ }
+ ret = zxdh_np_dtb_acl_table_dump_by_vport(hw->dev_id, dtb_qid,
+ ZXDH_SDT_FD_TABLE, hw->vport.vport, &entry_num,
+ (uint8_t *)fd_entry);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpp_dtb_acl_table_dump_by_vport failed!");
+ goto end;
+ }
+ for (uint32_t i = 0; i < entry_num; i++) {
+ offlow_key_dump((struct fd_flow_key *)fd_entry[i].key_data,
+ (struct fd_flow_key *)fd_entry[i].key_mask, file);
+ offlow_result_dump((struct fd_flow_result *)fd_entry[i].p_as_rslt,
+ file);
+ }
+ rte_free(result);
+ rte_free(key_mask);
+ rte_free(key);
+ rte_free(fd_entry);
+ } else {
+ entry = rte_malloc(NULL, sizeof(struct rte_flow), 0);
+ entry_flow = rte_malloc(NULL, sizeof(struct zxdh_flow), 0);
+ TAILQ_FOREACH(entry, &hw->dh_flow_list, next) {
+ entry_flow = (struct zxdh_flow *)entry->driver_flow;
+ offlow_key_dump(&entry_flow->flowentry.fd_flow.key,
+ &entry_flow->flowentry.fd_flow.key_mask, file);
+ offlow_result_dump(&entry_flow->flowentry.fd_flow.result, file);
+ }
+ rte_free(entry_flow);
+ rte_free(entry);
+ }
+ }
+ handle_res_dump(dev);
+
+ return 0;
+end:
+ rte_free(result);
+ rte_free(key_mask);
+ rte_free(key);
+ rte_free(fd_entry);
+ return -1;
+}
+
+static int32_t
+get_available_handle(struct zxdh_hw *hw, uint16_t vport)
+{
+ int ret = 0;
+ uint32_t handle_idx = 0;
+
+ ret = zxdh_np_dtb_acl_index_request(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, &handle_idx);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for hw!");
+ return INVALID_HANDLEIDX;
+ }
+ return handle_idx;
+}
+
+static int free_handle(struct zxdh_hw *hw, uint16_t handle_idx, uint16_t vport)
+{
+ int ret = zxdh_np_dtb_acl_index_release(hw->dev_id, ZXDH_SDT_FD_TABLE, vport, handle_idx);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to free handle_idx %d for hw!", handle_idx);
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zxdh_encap0_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0;
+
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t
+zxdh_encap0_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap0_index * 2 + 1;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap0.dip;
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP0_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t zxdh_encap1_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ if (dh_flow->encap0.ethtype == 0)
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4;
+ else
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 1;
+
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1;
+
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static uint16_t
+zxdh_encap1_ip_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow *dh_flow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ERAM_ENTRY_INFO_T *dtb_eram_entry;
+ dtb_eram_entry = rte_zmalloc(NULL, sizeof(ZXDH_DTB_ERAM_ENTRY_INFO_T), 0);
+
+ if (dtb_eram_entry == NULL)
+ return INVALID_HANDLEIDX;
+ if (dh_flow->encap0.ethtype == 0)
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 2;
+ else
+ dtb_eram_entry->index = dh_flow->flowentry.fd_flow.result.encap1_index * 4 + 3;
+ dtb_eram_entry->p_data = (uint32_t *)&dh_flow->encap1.sip;
+ dtb_entry->sdt_no = ZXDH_SDT_TUNNEL_ENCAP1_TABLE;
+ dtb_entry->p_entry_data = dtb_eram_entry;
+ return 0;
+}
+
+static int zxdh_hw_encap_insert(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error)
+{
+ uint32_t ret;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+
+ zxdh_encap0_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap0_ip_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap1_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+
+ zxdh_encap1_ip_to_dtbentry(hw, dh_flow, &dtb_entry);
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zxdh_fd_flow_to_dtbentry(struct zxdh_hw *hw __rte_unused,
+ struct zxdh_flow_info *fdflow,
+ ZXDH_DTB_USER_ENTRY_T *dtb_entry)
+{
+ ZXDH_DTB_ACL_ENTRY_INFO_T *dtb_acl_entry;
+ uint16_t handle_idx = 0;
+ dtb_acl_entry = rte_zmalloc("fdflow_dtbentry", sizeof(ZXDH_DTB_ACL_ENTRY_INFO_T), 0);
+
+ if (dtb_acl_entry == NULL)
+ return INVALID_HANDLEIDX;
+
+ dtb_acl_entry->key_data = (uint8_t *)&fdflow->fd_flow.key;
+ dtb_acl_entry->key_mask = (uint8_t *)&fdflow->fd_flow.key_mask;
+ dtb_acl_entry->p_as_rslt = (uint8_t *)&fdflow->fd_flow.result;
+
+ handle_idx = fdflow->hw_idx;
+
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_free(dtb_acl_entry);
+ return INVALID_HANDLEIDX;
+ }
+ dtb_acl_entry->handle = handle_idx;
+ dtb_entry->sdt_no = ZXDH_SDT_FD_TABLE;
+ dtb_entry->p_entry_data = dtb_acl_entry;
+ return handle_idx;
+}
+
+static int zxdh_hw_flow_insert(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+ uint32_t ret;
+ uint16_t handle_idx;
+
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx == INVALID_HANDLEIDX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for hw");
+ return -1;
+ }
+ ret = zxdh_np_dtb_table_entry_write(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ ret = free_handle(hw, handle_idx, vport);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "realease handle_idx to hw failed");
+ return -1;
+ }
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "write to hw failed");
+ return -1;
+ }
+ dh_flow->flowentry.hw_idx = handle_idx;
+ return 0;
+}
+
+static int
+hw_count_query(struct zxdh_hw *hw, uint32_t countid, bool clear,
+ struct flow_stats *fstats, struct rte_flow_error *error)
+{
+ uint32_t stats_id = 0;
+ int ret = 0;
+ stats_id = countid;
+ if (stats_id >= ZXDH_MAX_FLOW_NUM) {
+ PMD_DRV_LOG(DEBUG, "query count id %d invalid", stats_id);
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query count id invalid");
+ return -rte_errno;
+ }
+ PMD_DRV_LOG(DEBUG, "query count id %d,clear %d ", stats_id, clear);
+ if (!clear)
+ ret = zxdh_np_dtb_stats_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, 1,
+ stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,
+ (uint32_t *)fstats);
+ else
+ ret = zxdh_np_stat_ppu_cnt_get_ex(hw->dev_id, 1,
+ stats_id + ZXDH_FLOW_STATS_INGRESS_BASE,
+ 1, (uint32_t *)fstats);
+ if (ret)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "fail to get flow stats");
+ return ret;
+}
+
+static int
+count_deref(struct zxdh_hw *hw, uint32_t countid,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct count_res *count_res = &flow_count_ref[countid];
+ struct flow_stats fstats = {0};
+
+ rte_spinlock_lock(&count_res->count_lock);
+
+ if (count_res->count_ref >= 1) {
+ count_res->count_ref--;
+ } else {
+ rte_spinlock_unlock(&count_res->count_lock);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "count deref underflow");
+ }
+ if (count_res->count_ref == 0)
+ ret = hw_count_query(hw, countid, 1, &fstats, error);
+
+ rte_spinlock_unlock(&count_res->count_lock);
+ return ret;
+}
+
+static int
+count_ref(struct zxdh_hw *hw, uint32_t countid, struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct count_res *count_res = &flow_count_ref[countid];
+ struct flow_stats fstats = {0};
+
+ rte_spinlock_lock(&count_res->count_lock);
+ if (count_res->count_ref < 255) {
+ count_res->count_ref++;
+ } else {
+ rte_spinlock_unlock(&count_res->count_lock);
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "count ref overflow");
+ }
+
+ if (count_res->count_ref == 1)
+ ret = hw_count_query(hw, countid, 1, &fstats, error);
+
+ rte_spinlock_unlock(&count_res->count_lock);
+ return ret;
+}
+
+int
+pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint8_t vf_index = 0;
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ uint32_t countid = MAX_FLOW_COUNT_NUM;
+ uint32_t handle_idx = 0;
+ union zxdh_virport_num port = {0};
+
+ port.vport = vport;
+ handle_idx = get_available_handle(hw, vport);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to allocate memory for hw");
+ return -1;
+ }
+ dh_flow->flowentry.hw_idx = handle_idx;
+ if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0) {
+ countid = handle_idx;
+ dh_flow->flowentry.fd_flow.result.countid = countid;
+ }
+
+ if ((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) {
+ dh_flow->flowentry.fd_flow.result.encap0_index = handle_idx;
+ if (!port.vf_flag) {
+ dh_flow->flowentry.fd_flow.result.encap1_index =
+ hw->hash_search_index * MAX_ENCAP1_NUM;
+ } else {
+ vf_index = VF_IDX(pcieid);
+ if (vf_index < (ZXDH_MAX_VF - 1)) {
+ dh_flow->flowentry.fd_flow.result.encap1_index =
+ hw->hash_search_index * MAX_ENCAP1_NUM + vf_index + 1;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "encap1 vf_index is too big");
+ return -1;
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "encap_index (%d)(%d)",
+ dh_flow->flowentry.fd_flow.result.encap0_index,
+ dh_flow->flowentry.fd_flow.result.encap1_index);
+ if (zxdh_hw_encap_insert(dev, dh_flow, error) != 0)
+ return -1;
+ }
+ ret = zxdh_hw_flow_insert(dev, dh_flow, error, vport);
+ if (!ret && countid < MAX_FLOW_COUNT_NUM)
+ ret = count_ref(hw, countid, error);
+
+ if (!ret) {
+ if (!port.vf_flag) {
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num++;
+ if (hw->vxlan_fd_num == 1)
+ set_vxlan_enable(dev, 1, error);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+zxdh_hw_flow_del(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport)
+{
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry = {0};
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t dtb_qid = hw->dev_sd->dtb_sd.queueid;
+ uint32_t ret;
+ uint16_t handle_idx;
+
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory for hw");
+ return -1;
+ }
+ ret = zxdh_np_dtb_table_entry_delete(hw->dev_id, dtb_qid, 1, &dtb_entry);
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "delete to hw failed");
+ return -1;
+ }
+ ret = free_handle(hw, handle_idx, vport);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "realease handle_idx to hw failed");
+ return -1;
+ }
+ PMD_DRV_LOG(DEBUG, "realease handle_idx to hw succ! %d", handle_idx);
+ return ret;
+}
+
+int
+pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport,
+ uint16_t pcieid __rte_unused)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ union zxdh_virport_num port = {0};
+ int ret = 0;
+
+ port.vport = vport;
+ ret = zxdh_hw_flow_del(dev, dh_flow, error, vport);
+ PMD_DRV_LOG(DEBUG, "destroy handle id %d", dh_flow->flowentry.hw_idx);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ uint32_t countid;
+ countid = dh_flow->flowentry.hw_idx;
+ if ((action_bits & (1 << FD_ACTION_COUNT_BIT)) != 0)
+ ret = count_deref(hw, countid, error);
+ if (!port.vf_flag) {
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num--;
+ if (hw->vxlan_fd_num == 0)
+ set_vxlan_enable(dev, 0, error);
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+zxdh_hw_flow_query(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ int ret = 0;
+ struct zxdh_flow_info *flow = &dh_flow->flowentry;
+ ZXDH_DTB_USER_ENTRY_T dtb_entry;
+ uint16_t handle_idx;
+
+ handle_idx = zxdh_fd_flow_to_dtbentry(hw, flow, &dtb_entry);
+ if (handle_idx >= ZXDH_MAX_FLOW_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to build hw entry for query");
+ ret = -1;
+ goto free_res;
+ }
+ ret = zxdh_np_dtb_table_entry_get(hw->dev_id, hw->dev_sd->dtb_sd.queueid, &dtb_entry, 0);
+ if (ret != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed query entry from hw ");
+ goto free_res;
+ }
+
+free_res:
+ zxdh_fd_flow_free_dtbentry(&dtb_entry);
+
+ return ret;
+}
+
+int
+pf_fd_hw_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct flow_stats fstats = {0};
+ int ret = 0;
+ uint32_t countid;
+
+ memset(&flow->flowentry.fd_flow.result, 0, sizeof(struct fd_flow_result));
+ ret = zxdh_hw_flow_query(dev, flow, error);
+ if (ret) {
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query failed");
+ return -rte_errno;
+ }
+ countid = flow->flowentry.hw_idx;
+ if (countid >= ZXDH_MAX_FLOW_NUM) {
+ ret = rte_flow_error_set(error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "query count id invalid");
+ return -rte_errno;
+ }
+ ret = hw_count_query(hw, countid, 0, &fstats, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "fail to get flow stats");
+ return ret;
+ }
+ count->bytes = (uint64_t)(rte_le_to_cpu_32(fstats.hit_bytes_hi)) << 32 |
+ rte_le_to_cpu_32(fstats.hit_bytes_lo);
+ count->hits = (uint64_t)(rte_le_to_cpu_32(fstats.hit_pkts_hi)) << 32 |
+ rte_le_to_cpu_32(fstats.hit_pkts_lo);
+ return ret;
+}
+
+static int
+fd_flow_parse_attr(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct zxdh_flow *dh_flow)
+{
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group >= MAX_GROUP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ if (dh_flow) {
+ dh_flow->group = attr->group;
+ dh_flow->direct = (attr->ingress == 1) ? 0 : 1;
+ dh_flow->pri = attr->priority;
+ }
+
+ return 0;
+}
+
+static int fd_flow_parse_pattern(struct rte_eth_dev *dev, const struct rte_flow_item *items,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ struct zxdh_flow_info *flow = NULL;
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec = NULL, *ipv6_mask = NULL;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ struct fd_flow_key *key, *key_mask;
+
+ if (dh_flow) {
+ flow = &dh_flow->flowentry;
+ } else {
+ flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory ");
+ return -rte_errno;
+ }
+ }
+
+ key = &flow->fd_flow.key;
+ key_mask = &flow->fd_flow.key_mask;
+ key->vfid = rte_cpu_to_be_16(priv->vfid);
+ key_mask->vfid = 0xffff;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ item = items;
+ if (items->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "Not support range");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ if (eth_spec && eth_mask) {
+ key->mac_dst = eth_spec->dst;
+ key->mac_src = eth_spec->src;
+ key_mask->mac_dst = eth_mask->dst;
+ key_mask->mac_src = eth_mask->src;
+
+ //ether_type = rte_be_to_cpu_16(eth_spec->type);
+ if (eth_mask->type == 0xffff) {
+ key->ether_type = eth_spec->type;
+ key_mask->ether_type = eth_mask->type;
+ }
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (vlan_spec && vlan_mask) {
+ key->vlan_tci = vlan_spec->tci;
+ key_mask->vlan_tci = vlan_mask->tci;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ if (ipv4_spec && ipv4_mask) {
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.hdr_checksum ||
+ ipv4_mask->hdr.time_to_live) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+ /* Get the filter info */
+ key->nw_proto =
+ ipv4_spec->hdr.next_proto_id;
+ key->tos =
+ ipv4_spec->hdr.type_of_service;
+ key_mask->nw_proto =
+ ipv4_mask->hdr.next_proto_id;
+ key_mask->tos =
+ ipv4_mask->hdr.type_of_service;
+ key->frag_flag = (ipv4_spec->hdr.fragment_offset != 0) ? 1 : 0;
+ key_mask->frag_flag = (ipv4_mask->hdr.fragment_offset != 0) ? 1 : 0;
+ rte_memcpy((uint32_t *)key->src_ip + 3,
+ &ipv4_spec->hdr.src_addr, 4);
+ rte_memcpy((uint32_t *)key->dst_ip + 3,
+ &ipv4_spec->hdr.dst_addr, 4);
+ rte_memcpy((uint32_t *)key_mask->src_ip + 3,
+ &ipv4_mask->hdr.src_addr, 4);
+ rte_memcpy((uint32_t *)key_mask->dst_ip + 3,
+ &ipv4_mask->hdr.dst_addr, 4);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ if (ipv6_spec && ipv6_mask) {
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ key->tc =
+ (uint8_t)((ipv6_spec->hdr.vtc_flow &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT);
+ key_mask->tc =
+ (uint8_t)((ipv6_mask->hdr.vtc_flow &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT);
+
+ key->nw_proto = ipv6_spec->hdr.proto;
+ key_mask->nw_proto = ipv6_mask->hdr.proto;
+
+ rte_memcpy(key->src_ip,
+ &ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(key->dst_ip,
+ &ipv6_spec->hdr.dst_addr, 16);
+ rte_memcpy(key_mask->src_ip,
+ &ipv6_mask->hdr.src_addr, 16);
+ rte_memcpy(key_mask->dst_ip,
+ &ipv6_mask->hdr.dst_addr, 16);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (tcp_spec && tcp_mask) {
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp ||
+ (tcp_mask->hdr.src_port &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ key->tp_src = tcp_spec->hdr.src_port;
+ key_mask->tp_src = tcp_mask->hdr.src_port;
+
+ key->tp_dst = tcp_spec->hdr.dst_port;
+ key_mask->tp_dst = tcp_mask->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_spec && udp_mask) {
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum ||
+ (udp_mask->hdr.src_port &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ key->tp_src = udp_spec->hdr.src_port;
+ key_mask->tp_src = udp_mask->hdr.src_port;
+
+ key->tp_dst = udp_spec->hdr.dst_port;
+ key_mask->tp_dst = udp_mask->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (!(sctp_spec && sctp_mask))
+ break;
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid sctp mask");
+ return -rte_errno;
+ }
+
+ /* Mask for SCTP src/dst ports not supported */
+ if (sctp_mask->hdr.src_port &&
+ sctp_mask->hdr.src_port != UINT16_MAX)
+ return -rte_errno;
+ if (sctp_mask->hdr.dst_port &&
+ sctp_mask->hdr.dst_port != UINT16_MAX)
+ return -rte_errno;
+
+ key->tp_src = sctp_spec->hdr.src_port;
+ key_mask->tp_src = sctp_mask->hdr.src_port;
+ key->tp_dst = sctp_spec->hdr.dst_port;
+ key_mask->tp_dst = sctp_mask->hdr.dst_port;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ {
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ static const struct rte_flow_item_vxlan flow_item_vxlan_mask = {
+ .vni = {0xff, 0xff, 0xff},
+ };
+ if (!(vxlan_spec && vxlan_mask))
+ break;
+ if (memcmp(vxlan_mask, &flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vxlan mask");
+ return -rte_errno;
+ }
+ rte_memcpy(key->vni, vxlan_spec->vni, 3);
+ rte_memcpy(key_mask->vni, vxlan_mask->vni, 3);
+ break;
+ }
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ }
+
+ data_bitwise(key_mask, sizeof(*key_mask));
+ return 0;
+}
+
+static inline int
+validate_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_rss *rss = action->conf;
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+ if (rss->level > 1)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ /* allow RSS key_len 0 in case of NULL (default) RSS key. */
+ if (rss->key_len == 0 && rss->key != NULL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key length 0");
+ if (rss->key_len > 0 && rss->key_len < ZXDH_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > ZXDH_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > dev->data->nb_rx_queues)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "No queues configured");
+ return 0;
+}
+
+static int
+fd_flow_parse_vxlan_encap(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_item *item,
+ struct zxdh_flow *dh_flow)
+{
+ const struct rte_flow_item *items;
+ const struct rte_flow_item_eth *item_eth;
+ const struct rte_flow_item_vlan *item_vlan;
+ const struct rte_flow_item_ipv4 *item_ipv4;
+ const struct rte_flow_item_ipv6 *item_ipv6;
+ const struct rte_flow_item_udp *item_udp;
+ const struct rte_flow_item_vxlan *item_vxlan;
+ uint32_t i = 0;
+ rte_be32_t addr;
+
+ for (i = 0; i < ACTION_VXLAN_ENCAP_ITEMS_NUM; i++) {
+ items = &item[i];
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ item_eth = items->spec;
+ rte_memcpy(&dh_flow->encap0.dst_mac1, item_eth->dst.addr_bytes, 2);
+ rte_memcpy(&dh_flow->encap1.src_mac1, item_eth->src.addr_bytes, 2);
+ rte_memcpy(&dh_flow->encap0.dst_mac2, &item_eth->dst.addr_bytes[2], 4);
+ rte_memcpy(&dh_flow->encap1.src_mac2, &item_eth->src.addr_bytes[2], 4);
+ dh_flow->encap0.dst_mac1 = rte_bswap16(dh_flow->encap0.dst_mac1);
+ dh_flow->encap1.src_mac1 = rte_bswap16(dh_flow->encap1.src_mac1);
+ dh_flow->encap0.dst_mac2 = rte_bswap32(dh_flow->encap0.dst_mac2);
+ dh_flow->encap1.src_mac2 = rte_bswap32(dh_flow->encap1.src_mac2);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ item_vlan = items->spec;
+ dh_flow->encap1.vlan_tci = item_vlan->hdr.vlan_tci;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ item_ipv4 = items->spec;
+ dh_flow->encap0.ethtype = 0;
+ dh_flow->encap0.tos = item_ipv4->hdr.type_of_service;
+ dh_flow->encap0.ttl = item_ipv4->hdr.time_to_live;
+ addr = rte_bswap32(item_ipv4->hdr.src_addr);
+ rte_memcpy((uint32_t *)dh_flow->encap1.sip.ip_addr + 3, &addr, 4);
+ addr = rte_bswap32(item_ipv4->hdr.dst_addr);
+ rte_memcpy((uint32_t *)dh_flow->encap0.dip.ip_addr + 3, &addr, 4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ item_ipv6 = items->spec;
+ dh_flow->encap0.ethtype = 1;
+ dh_flow->encap0.tos =
+ (item_ipv6->hdr.vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
+ dh_flow->encap0.ttl = item_ipv6->hdr.hop_limits;
+ rte_memcpy(dh_flow->encap1.sip.ip_addr, &item_ipv6->hdr.src_addr, 16);
+ dh_flow->encap1.sip.ip_addr[0] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[0]);
+ dh_flow->encap1.sip.ip_addr[1] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[1]);
+ dh_flow->encap1.sip.ip_addr[2] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[2]);
+ dh_flow->encap1.sip.ip_addr[3] =
+ rte_bswap32(dh_flow->encap1.sip.ip_addr[3]);
+ rte_memcpy(dh_flow->encap0.dip.ip_addr, &item_ipv6->hdr.dst_addr, 16);
+ dh_flow->encap0.dip.ip_addr[0] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[0]);
+ dh_flow->encap0.dip.ip_addr[1] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[1]);
+ dh_flow->encap0.dip.ip_addr[2] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[2]);
+ dh_flow->encap0.dip.ip_addr[3] =
+ rte_bswap32(dh_flow->encap0.dip.ip_addr[3]);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ item_udp = items->spec;
+ dh_flow->encap0.tp_dst = item_udp->hdr.dst_port;
+ dh_flow->encap0.tp_dst = rte_bswap16(dh_flow->encap0.tp_dst);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ item_vxlan = items->spec;
+ dh_flow->encap0.vni = item_vxlan->vni[0] * 65536 +
+ item_vxlan->vni[1] * 256 + item_vxlan->vni[2];
+ break;
+ case RTE_FLOW_ITEM_TYPE_END:
+ default:
+ break;
+ }
+ }
+ dh_flow->encap0.hit_flag = 1;
+ dh_flow->encap1.hit_flag = 1;
+
+ return 0;
+}
+
+static int
+fd_flow_parse_action(struct rte_eth_dev *dev, const struct rte_flow_action *actions,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ struct zxdh_flow_info *flow = NULL;
+ struct fd_flow_result *result = NULL;
+ const struct rte_flow_item *enc_item = NULL;
+ uint8_t action_bitmap = 0;
+ uint32_t dest_num = 0;
+ uint32_t mark_num = 0;
+ uint32_t counter_num = 0;
+ int ret;
+
+ rte_errno = 0;
+ if (dh_flow) {
+ flow = &dh_flow->flowentry;
+ } else {
+ flow = rte_zmalloc("dh_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory ");
+ return -rte_errno;
+ }
+ }
+ result = &flow->fd_flow.result;
+ action_bitmap = result->action_idx;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_RSS_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi rss action no support.");
+ goto free_flow;
+ }
+ ret = validate_action_rss(dev, actions, error);
+ if (ret)
+ goto free_flow;
+ action_bitmap |= (1 << FD_ACTION_RSS_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ mark_num++;
+ if (action_bitmap & (1 << FD_ACTION_MARK_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi mark action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_mark *act_mark = actions->conf;
+ result->mark_fd_id = rte_cpu_to_le_32(act_mark->id);
+ action_bitmap |= (1 << FD_ACTION_MARK_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ counter_num++;
+ if (action_bitmap & (1 << FD_ACTION_COUNT_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi count action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_count *act_count = actions->conf;
+ if (act_count->id > MAX_FLOW_COUNT_NUM) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "count action id no support.");
+ goto free_flow;
+ };
+ result->countid = act_count->id;
+ action_bitmap |= (1 << FD_ACTION_COUNT_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_QUEUE_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi queue action no support.");
+ goto free_flow;
+ }
+ const struct rte_flow_action_queue *act_q;
+ act_q = actions->conf;
+ if (act_q->index >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid queue ID");
+ goto free_flow;
+ }
+ ret = zxdh_hw_qid_to_logic_qid(dev, act_q->index << 1);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid phy queue ID .");
+ goto free_flow;
+ }
+ result->qid = rte_cpu_to_le_16(ret);
+ action_bitmap |= (1 << FD_ACTION_QUEUE_BIT);
+
+ PMD_DRV_LOG(DEBUG, "QID RET 0x%x", result->qid);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_DROP_BIT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_DROP_BIT);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ {
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_VXLAN_DECAP)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_VXLAN_DECAP);
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ enc_item = ((const struct rte_flow_action_vxlan_encap *)
+ actions->conf)->definition;
+ if (dh_flow != NULL)
+ fd_flow_parse_vxlan_encap(dev, enc_item, dh_flow);
+ dest_num++;
+ if (action_bitmap & (1 << FD_ACTION_VXLAN_ENCAP)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "multi drop action no support.");
+ goto free_flow;
+ }
+ action_bitmap |= (1 << FD_ACTION_VXLAN_ENCAP);
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ goto free_flow;
+ }
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ if (mark_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many mark actions");
+ return -rte_errno;
+ }
+
+ if (counter_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many count actions");
+ return -rte_errno;
+ }
+
+ if (dest_num + mark_num + counter_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Empty action");
+ return -rte_errno;
+ }
+
+ result->action_idx = action_bitmap;
+ return 0;
+
+free_flow:
+ if (!dh_flow)
+ rte_free(flow);
+ return -rte_errno;
+}
+
+static int
+fd_parse_pattern_action(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error, struct zxdh_flow *dh_flow)
+{
+ int ret = 0;
+ ret = fd_flow_parse_attr(dev, attr, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+ ret = fd_flow_parse_pattern(dev, pattern, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+
+ ret = fd_flow_parse_action(dev, actions, error, dh_flow);
+ if (ret < 0)
+ return -rte_errno;
+ return 0;
+}
+
+struct dh_flow_engine pf_fd_engine = {
+ .apply = pf_fd_hw_apply,
+ .destroy = pf_fd_hw_destroy,
+ .query_count = pf_fd_hw_query_count,
+ .parse_pattern_action = fd_parse_pattern_action,
+ .type = FLOW_TYPE_FD_TCAM,
+};
+
+
+static int
+vf_flow_msg_process(enum zxdh_msg_type msg_type, struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow, struct rte_flow_error *error,
+ struct rte_flow_query_count *count)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_msg_info msg_info = {0};
+ struct zxdh_flow_op_msg *flow_msg = &msg_info.data.flow_msg;
+
+ uint8_t zxdh_msg_reply_info[ZXDH_ST_SZ_BYTES(msg_reply_info)] = {0};
+ void *reply_body_addr = ZXDH_ADDR_OF(msg_reply_info, zxdh_msg_reply_info, reply_body);
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body_addr, flow_rsp);
+ uint8_t flow_op_rsp[sizeof(struct zxdh_flow_op_rsp)] = {0};
+ uint16_t len = sizeof(struct zxdh_flow_op_rsp) - 4;
+ zxdh_adjust_flow_op_rsp_memory_layout(flow_rsp_addr, len, flow_op_rsp);
+ struct zxdh_flow_op_rsp *flow_rsp = (struct zxdh_flow_op_rsp *)flow_op_rsp;
+
+ dh_flow->hash_search_index = hw->hash_search_index;
+ rte_memcpy(&flow_msg->dh_flow, dh_flow, sizeof(struct zxdh_flow));
+
+ zxdh_msg_head_build(hw, msg_type, &msg_info);
+ ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),
+ (void *)zxdh_msg_reply_info, ZXDH_ST_SZ_BYTES(msg_reply_info));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "port %d flow op %d failed ret %d", hw->port_id, msg_type, ret);
+ if (ret == -2) {
+ PMD_DRV_LOG(ERR, "port %d flow %d failed: cause %s",
+ hw->port_id, msg_type, flow_rsp->error.reason);
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ flow_rsp->error.reason);
+ } else {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "msg channel error");
+ }
+ return ret;
+ }
+
+ if (msg_type == ZXDH_FLOW_HW_ADD)
+ dh_flow->flowentry.hw_idx = flow_rsp->dh_flow.flowentry.hw_idx;
+ if (count)
+ rte_memcpy((void *)count, &flow_rsp->count, sizeof(flow_rsp->count));
+
+ return ret;
+}
+
+static int
+vf_fd_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport __rte_unused,
+ uint16_t pcieid __rte_unused)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_ADD, dev, dh_flow, error, NULL);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num++;
+ if (hw->vxlan_fd_num == 1) {
+ set_vxlan_enable(dev, 1, error);
+ PMD_DRV_LOG(DEBUG, "vf set_vxlan_enable");
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+vf_fd_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport __rte_unused,
+ uint16_t pcieid __rte_unused)
+{
+ int ret = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_DEL, dev, dh_flow, error, NULL);
+ if (!ret) {
+ uint8_t action_bits = dh_flow->flowentry.fd_flow.result.action_idx;
+ if (((action_bits & (1 << FD_ACTION_VXLAN_ENCAP)) != 0) ||
+ ((action_bits & (1 << FD_ACTION_VXLAN_DECAP)) != 0)) {
+ hw->vxlan_fd_num--;
+ if (hw->vxlan_fd_num == 0) {
+ set_vxlan_enable(dev, 0, error);
+ PMD_DRV_LOG(DEBUG, "vf set_vxlan_disable");
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+vf_fd_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ ret = vf_flow_msg_process(ZXDH_FLOW_HW_GET, dev, dh_flow, error, count);
+ return ret;
+}
+
+
+static struct dh_flow_engine vf_fd_engine = {
+ .apply = vf_fd_apply,
+ .destroy = vf_fd_destroy,
+ .parse_pattern_action = fd_parse_pattern_action,
+ .query_count = vf_fd_query_count,
+ .type = FLOW_TYPE_FD_TCAM,
+};
+
+void zxdh_flow_init(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *priv = dev->data->dev_private;
+ if (priv->is_pf)
+ zxdh_register_flow_engine(&pf_fd_engine);
+ else
+ zxdh_register_flow_engine(&vf_fd_engine);
+ TAILQ_INIT(&priv->dh_flow_list);
+}
+
+const struct rte_flow_ops zxdh_flow_ops = {
+ .validate = zxdh_flow_validate,
+ .create = zxdh_flow_create,
+ .destroy = zxdh_flow_destroy,
+ .flush = zxdh_flow_flush,
+ .query = zxdh_flow_query,
+ .dev_dump = zxdh_flow_dev_dump,
+};
+
+int
+zxdh_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
+{
+ *ops = &zxdh_flow_ops;
+
+ return 0;
+}
+
+void
+zxdh_flow_release(struct rte_eth_dev *dev)
+{
+ struct rte_flow_error error = {0};
+ const struct rte_flow_ops *flow_ops = NULL;
+
+ if (dev->dev_ops && dev->dev_ops->flow_ops_get)
+ dev->dev_ops->flow_ops_get(dev, &flow_ops);
+ if (flow_ops && flow_ops->flush)
+ flow_ops->flush(dev, &error);
+}
diff --git a/drivers/net/zxdh/zxdh_flow.h b/drivers/net/zxdh/zxdh_flow.h
new file mode 100644
index 0000000000..cbcf71b3e1
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_flow.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_FLOW_H
+#define ZXDH_FLOW_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_arp.h>
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_icmp.h>
+#include <rte_ip.h>
+#include <rte_sctp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_byteorder.h>
+#include <rte_flow_driver.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_GROUP 1
+#define ZXDH_MAX_FLOW_NUM 2048
+#define MAX_FLOW_COUNT_NUM ZXDH_MAX_FLOW_NUM
+#define ZXDH_FLOW_GROUP_TCAM 1
+
+#ifndef IPv4_BYTES
+#define IPv4_BYTES_FMT "%" PRIu8 ".%" PRIu8 ".%" PRIu8 ".%" PRIu8
+#define IPv4_BYTES(addr) \
+ (uint8_t)(((addr) >> 24) & 0xFF),\
+ (uint8_t)(((addr) >> 16) & 0xFF),\
+ (uint8_t)(((addr) >> 8) & 0xFF),\
+ (uint8_t)((addr) & 0xFF)
+#endif
+
+#ifndef IPv6_BYTES
+#define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:" \
+ "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define IPv6_BYTES(addr) \
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], \
+ addr[8], addr[9], addr[10], addr[11], addr[12], addr[13], addr[14], addr[15]
+#endif
+
+enum {
+ FD_ACTION_VXLAN_ENCAP = 0,
+ FD_ACTION_VXLAN_DECAP = 1,
+ FD_ACTION_RSS_BIT = 2,
+ FD_ACTION_COUNT_BIT = 3,
+ FD_ACTION_DROP_BIT = 4,
+ FD_ACTION_MARK_BIT = 5,
+ FD_ACTION_QUEUE_BIT = 6,
+};
+
+struct fd_flow_key {
+ struct rte_ether_addr mac_dst; /**< Destination MAC. */
+ struct rte_ether_addr mac_src; /**< Source MAC. */
+ rte_be16_t ether_type; /**< EtherType */
+ union {
+ struct {
+ rte_be16_t cvlan_pri:4; /**< vlanid 0xfff is valid */
+ rte_be16_t cvlan_vlanid:12; /**< vlanid 0xfff is valid */
+ };
+ rte_be16_t vlan_tci;
+ };
+
+ uint8_t src_ip[16]; /** ip src */
+ uint8_t dst_ip[16]; /** ip dst */
+ uint8_t rsv0;
+ union {
+ uint8_t tos;
+ uint8_t tc;
+ };
+ uint8_t nw_proto;
+ uint8_t frag_flag;/*1表示分片 0 表示非分片*/
+ rte_be16_t tp_src;
+ rte_be16_t tp_dst;
+
+ uint8_t rsv1;/**/
+ uint8_t vni[3];/**/
+
+ rte_be16_t vfid;
+ uint8_t rsv2[18];
+};
+
+struct fd_flow_result {
+ rte_le16_t qid;
+ uint8_t rsv0;
+
+ uint8_t action_idx:7;
+ uint8_t hit_flag:1;
+
+ rte_le32_t mark_fd_id;
+ rte_le32_t countid:20;
+ rte_le32_t encap1_index:12;
+
+ rte_le16_t encap0_index:12;
+ rte_le16_t rsv1:4;
+ uint8_t rss_hash_factor;
+ uint8_t rss_hash_alg;
+};
+
+struct fd_flow_entry {
+ struct fd_flow_key key;
+ struct fd_flow_key key_mask;
+ struct fd_flow_result result;
+};
+
+struct flow_stats {
+ uint32_t hit_pkts_hi;
+ uint32_t hit_pkts_lo;
+ uint32_t hit_bytes_hi;
+ uint32_t hit_bytes_lo;
+};
+
+
+enum dh_flow_type {
+ FLOW_TYPE_FLOW = 0,
+ FLOW_TYPE_FD_TCAM,
+ FLOW_TYPE_FD_SW,
+};
+
+struct zxdh_flow_info {
+ enum dh_flow_type flowtype;
+ uint16_t hw_idx;
+ uint16_t rsv;
+ union {
+ struct fd_flow_entry fd_flow;
+ };
+};
+
+struct tunnel_encap_ip {
+ rte_be32_t ip_addr[4];
+};
+
+struct tunnel_encap0 {
+ uint8_t tos;
+ uint8_t rsv2[2];
+ uint8_t rsv1: 6;
+ uint8_t ethtype: 1;
+ uint8_t hit_flag: 1;
+ uint16_t dst_mac1;
+ uint16_t tp_dst;
+ uint32_t dst_mac2;
+ uint32_t ttl:8;
+ uint32_t vni:24;
+ struct tunnel_encap_ip dip;
+};
+
+struct tunnel_encap1 {
+ uint32_t rsv1: 31;
+ uint32_t hit_flag: 1;
+ uint16_t src_mac1;
+ uint16_t vlan_tci;
+ uint32_t src_mac2;
+ uint32_t rsv;
+ struct tunnel_encap_ip sip;
+};
+
+struct zxdh_flow {
+ uint8_t direct; /* 0 in 1 out */
+ uint8_t group; /* rule group id */
+ uint8_t pri; /* priority */
+ uint8_t hash_search_index; /* */
+ struct zxdh_flow_info flowentry;
+ struct tunnel_encap0 encap0;
+ struct tunnel_encap1 encap1;
+};
+TAILQ_HEAD(dh_flow_list, rte_flow);
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ void *driver_flow;
+ uint32_t type;
+ uint16_t port_id;
+};
+
+struct count_res {
+ rte_spinlock_t count_lock;
+ uint8_t count_ref;
+ uint8_t rev[3];
+};
+
+/* Struct to store engine created. */
+struct dh_flow_engine {
+ TAILQ_ENTRY(dh_flow_engine) node;
+ enum dh_flow_type type;
+ int (*apply)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport, uint16_t pcieid);
+
+ int (*parse_pattern_action)
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct zxdh_flow *dh_flow);
+
+ int (*destroy)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error,
+ uint16_t vport, uint16_t pcieid);
+
+ int (*query_count)
+ (struct rte_eth_dev *dev,
+ struct zxdh_flow *dh_flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+};
+TAILQ_HEAD(dh_engine_list, dh_flow_engine);
+
+void zxdh_register_flow_engine(struct dh_flow_engine *engine);
+
+extern const struct rte_flow_ops zxdh_flow_ops;
+
+void zxdh_flow_global_init(void);
+void zxdh_flow_init(struct rte_eth_dev *dev);
+int pf_fd_hw_apply(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);
+int pf_fd_hw_destroy(struct rte_eth_dev *dev, struct zxdh_flow *dh_flow,
+ struct rte_flow_error *error, uint16_t vport, uint16_t pcieid);
+int pf_fd_hw_query_count(struct rte_eth_dev *dev,
+ struct zxdh_flow *flow,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *error);
+int zxdh_flow_ops_get(struct rte_eth_dev *dev, const struct rte_flow_ops **ops);
+void zxdh_flow_release(struct rte_eth_dev *dev);
+
+#endif /* ZXDH_FLOW_H */
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 02ecd93b12..7e73833bf4 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -19,6 +19,7 @@
#include "zxdh_tables.h"
#include "zxdh_np.h"
#include "zxdh_common.h"
+#include "zxdh_flow.h"
#define ZXDH_REPS_INFO_FLAG_USABLE 0x00
#define ZXDH_BAR_SEQID_NUM_MAX 256
@@ -1234,7 +1235,8 @@ zxdh_vf_promisc_uninit(struct zxdh_hw *hw, union zxdh_virport_num vport)
}
static int
-zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
+zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
struct zxdh_port_attr_table port_attr = {0};
@@ -1253,6 +1255,9 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
port_attr.hash_search_index = pf_hw->hash_search_index;
port_attr.port_base_qid = vf_init_msg->base_qid;
uint16_t vfid = zxdh_vport_to_vfid(port);
+ int vf_index = VF_IDX(pcieid);
+
+ pf_hw->vfinfo[vf_index].vport = vport;
ret = zxdh_set_port_attr(pf_hw, vfid, &port_attr);
if (ret) {
@@ -1265,6 +1270,12 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
PMD_DRV_LOG(ERR, "vf_promisc_table_init failed, code:%d", ret);
goto proc_end;
}
+
+ ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, pf_hw->dev_sd->dtb_sd.queueid,
+ ZXDH_SDT_FD_TABLE, vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret)
+ PMD_DRV_LOG(ERR, "flow table delete failed. code:%d", ret);
+
ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
*res_len = sizeof(uint8_t);
@@ -1276,30 +1287,30 @@ zxdh_vf_port_init(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport)
+zxdh_mac_clear(struct zxdh_hw *hw, union zxdh_virport_num vport, uint16_t pcieid)
{
- uint16_t vf_id = vport.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
int i;
int ret = 0;
for (i = 0; (i != ZXDH_MAX_MAC_ADDRS); ++i) {
- if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {
+ if (!rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {
ret = zxdh_del_mac_table(hw, vport.vport,
- &hw->vfinfo[vf_id].vf_mac[i],
+ &hw->vfinfo[vf_index].vf_mac[i],
hw->hash_search_index, 0, 0);
if (ret) {
PMD_DRV_LOG(ERR, "vf_del_mac_failed. code:%d", ret);
return ret;
}
- memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));
+ memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));
}
}
return ret;
}
static int
-zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
- uint16_t vport, void *cfg_data __rte_unused,
+zxdh_vf_port_uninit(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data __rte_unused,
void *res_info, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "uninit";
@@ -1317,7 +1328,7 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
goto proc_end;
}
- ret = zxdh_mac_clear(pf_hw, vport_num);
+ ret = zxdh_mac_clear(pf_hw, vport_num, pcieid);
if (ret) {
PMD_DRV_LOG(ERR, "zxdh_mac_clear failed, code:%d", ret);
goto proc_end;
@@ -1342,7 +1353,8 @@ zxdh_vf_port_uninit(struct zxdh_hw *pf_hw,
}
static int
-zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *reply_body, uint16_t *reply_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "add mac";
@@ -1350,13 +1362,13 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;
struct rte_ether_addr *addr = &mac_filter->mac;
int i = 0, ret = 0;
- uint16_t vf_id = port.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
port.vport = vport;
void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, reply_data);
void *mac_reply_msg_addr = ZXDH_ADDR_OF(msg_reply_body, reply_body, mac_reply_msg);
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++)
- if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], addr))
+ if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], addr))
goto success;
ret = zxdh_add_mac_table(hw, vport, addr, hw->hash_search_index, 0, 0);
@@ -1372,8 +1384,8 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
goto failure;
}
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {
- if (rte_is_zero_ether_addr(&hw->vfinfo[vf_id].vf_mac[i])) {
- memcpy(&hw->vfinfo[vf_id].vf_mac[i], addr, 6);
+ if (rte_is_zero_ether_addr(&hw->vfinfo[vf_index].vf_mac[i])) {
+ memcpy(&hw->vfinfo[vf_index].vf_mac[i], addr, 6);
break;
}
}
@@ -1393,14 +1405,15 @@ zxdh_add_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len)
+zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret, i = 0;
struct zxdh_mac_filter *mac_filter = (struct zxdh_mac_filter *)cfg_data;
union zxdh_virport_num port = (union zxdh_virport_num)vport;
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "del mac";
- uint16_t vf_id = port.vfid;
+ uint16_t vf_index = VF_IDX(pcieid);
void *reply_data_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, reply_data);
PMD_DRV_LOG(DEBUG, "[PF GET MSG FROM VF]--vf mac to del.");
@@ -1415,8 +1428,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
for (i = 0; i < ZXDH_MAX_MAC_ADDRS; i++) {
- if (rte_is_same_ether_addr(&hw->vfinfo[vf_id].vf_mac[i], &mac_filter->mac))
- memset(&hw->vfinfo[vf_id].vf_mac[i], 0, sizeof(struct rte_ether_addr));
+ if (rte_is_same_ether_addr(&hw->vfinfo[vf_index].vf_mac[i], &mac_filter->mac))
+ memset(&hw->vfinfo[vf_index].vf_mac[i], 0, sizeof(struct rte_ether_addr));
}
sprintf(str, "vport 0x%x del mac ret 0x%x\n", port.vport, ret);
@@ -1432,7 +1445,8 @@ zxdh_del_vf_mac_table(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_port_promisc_msg *promisc_msg = (struct zxdh_port_promisc_msg *)cfg_data;
@@ -1463,7 +1477,8 @@ zxdh_vf_promisc_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *res_info, uint16_t *res_len, uint8_t enable)
{
struct zxdh_vlan_filter *vlan_filter = cfg_data;
@@ -1488,21 +1503,24 @@ zxdh_vf_vlan_filter_table_process(struct zxdh_hw *hw, uint16_t vport, void *cfg_
}
static int
-zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_add(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
- return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 1);
+ return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 1);
}
static int
-zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_vlan_filter_table_del(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
void *res_info, uint16_t *res_len)
{
- return zxdh_vf_vlan_filter_table_process(hw, vport, cfg_data, res_info, res_len, 0);
+ return zxdh_vf_vlan_filter_table_process(hw, vport, pcieid, cfg_data, res_info, res_len, 0);
}
static int
-zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_vlan_filter_set *vlan_filter = cfg_data;
@@ -1526,7 +1544,8 @@ zxdh_vf_set_vlan_filter(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
struct zxdh_vlan_offload *vlan_offload = cfg_data;
@@ -1553,8 +1572,9 @@ zxdh_vf_set_vlan_offload(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";
struct zxdh_port_attr_table vport_att = {0};
@@ -1582,8 +1602,9 @@ zxdh_vf_rss_hf_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unus
}
static int
-zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_hf";
struct zxdh_rss_hf *rss_hf = cfg_data;
@@ -1618,8 +1639,9 @@ zxdh_vf_rss_hf_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *reply, uint16_t *res_len)
+zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_enable";
struct zxdh_rss_enable *rss_enable = cfg_data;
@@ -1654,7 +1676,8 @@ zxdh_vf_rss_enable(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
+zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";
@@ -1676,7 +1699,8 @@ zxdh_vf_rss_table_set(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
}
static int
-zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_unused,
+zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data __rte_unused,
void *reply, uint16_t *res_len)
{
char str[ZXDH_MSG_REPLY_BODY_MAX_LEN] = "rss_table";
@@ -1699,8 +1723,9 @@ zxdh_vf_rss_table_get(struct zxdh_hw *hw, uint16_t vport, void *cfg_data __rte_u
}
static int
-zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len)
+zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
RTE_ASSERT(!cfg_data || !pf_hw);
if (res_info)
@@ -1762,8 +1787,8 @@ zxdh_vf_port_attr_set(struct zxdh_hw *pf_hw, uint16_t vport, void *cfg_data,
static int
zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,
- void *cfg_data, void *res_info,
- uint16_t *res_len)
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
struct zxdh_np_stats_updata_msg *np_stats_query =
(struct zxdh_np_stats_updata_msg *)cfg_data;
@@ -1944,10 +1969,9 @@ zxdh_vf_np_stats_update(struct zxdh_hw *pf_hw, uint16_t vport,
}
static int
-zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,
- uint16_t vport, void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
struct zxdh_mtr_stats_query *zxdh_mtr_stats_query =
(struct zxdh_mtr_stats_query *)cfg_data;
@@ -1977,11 +2001,9 @@ zxdh_vf_mtr_hw_stats_get(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
if (!cfg_data || !res_len || !res_info) {
PMD_DRV_LOG(ERR, " get profileid invalid inparams");
@@ -2017,11 +2039,9 @@ zxdh_vf_mtr_hw_profile_add(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
if (!cfg_data || !res_len || !res_info) {
PMD_DRV_LOG(ERR, " del profileid invalid inparams");
@@ -2059,11 +2079,9 @@ zxdh_vf_mtr_hw_profile_del(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret = 0;
@@ -2098,11 +2116,9 @@ zxdh_vf_mtr_hw_plcrflow_cfg(struct zxdh_hw *pf_hw,
}
static int
-zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
- uint16_t vport,
- void *cfg_data,
- void *res_info,
- uint16_t *res_len)
+zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
{
int ret = 0;
@@ -2131,6 +2147,121 @@ zxdh_vf_mtr_hw_profile_cfg(struct zxdh_hw *pf_hw __rte_unused,
return 0;
}
+
+static int
+zxdh_vf_flow_hw_add(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ struct zxdh_flow *dh_flow;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = pf_fd_hw_apply(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow add failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);
+ dh_flow = flow_rsp_addr;
+ dh_flow->flowentry.hw_idx = flow_entry->dh_flow.flowentry.hw_idx;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_del(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = pf_fd_hw_destroy(pf_hw->eth_dev, &flow_entry->dh_flow, &error, vport, pcieid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "pf 0x%x for vf 0x%x flow del failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_get(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+
+ void *flow_rsp_addr = ZXDH_ADDR_OF(msg_reply_body, res_info, flow_rsp);
+ void *count_addr = (uint8_t *)flow_rsp_addr + sizeof(struct zxdh_flow);
+ struct rte_flow_error error = {0};
+ int ret = 0;
+ struct zxdh_flow_op_msg *flow_entry = (struct zxdh_flow_op_msg *)cfg_data;
+ struct zxdh_flow *dh_flow;
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ PMD_DRV_LOG(INFO, "handle %d", flow_entry->dh_flow.flowentry.hw_idx);
+ ret = pf_fd_hw_query_count(pf_hw->eth_dev, &flow_entry->dh_flow, count_addr, &error);
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "pf 0x%x for vf 0x%x flow get failed ret :%d",
+ pf_hw->vport.vport, vport, ret);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, " res len :%d", *res_len);
+ dh_flow = flow_rsp_addr;
+ rte_memcpy(&dh_flow->flowentry, &flow_entry->dh_flow.flowentry, sizeof(dh_flow->flowentry));
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
+static int
+zxdh_vf_flow_hw_flush(struct zxdh_hw *pf_hw, uint16_t vport,
+ uint16_t pcieid __rte_unused, void *cfg_data,
+ void *res_info, uint16_t *res_len)
+{
+ if (!cfg_data || !res_len || !res_info) {
+ PMD_DRV_LOG(ERR, "invalid inparams");
+ return -1;
+ }
+ int ret = 0;
+ uint16_t queue_id = pf_hw->dev_sd->dtb_sd.queueid;
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_FAIL);
+ *res_len = sizeof(struct zxdh_flow_op_rsp) - 4;
+
+ ret = zxdh_np_dtb_acl_offline_delete(pf_hw->dev_id, queue_id, ZXDH_SDT_FD_TABLE,
+ vport, ZXDH_FLOW_STATS_INGRESS_BASE, 1);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "flow flush failed. code:%d", ret);
+ return -1;
+ }
+
+ ZXDH_SET(msg_reply_body, res_info, flag, ZXDH_REPS_SUCC);
+ return 0;
+}
+
static const zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_NULL] = NULL,
[ZXDH_VF_PORT_INIT] = zxdh_vf_port_init,
@@ -2154,6 +2285,10 @@ static const zxdh_msg_process_callback zxdh_proc_cb[] = {
[ZXDH_PLCR_CAR_PROFILE_ID_DELETE] = zxdh_vf_mtr_hw_profile_del,
[ZXDH_PLCR_CAR_QUEUE_CFG_SET] = zxdh_vf_mtr_hw_plcrflow_cfg,
[ZXDH_PLCR_CAR_PROFILE_CFG_SET] = zxdh_vf_mtr_hw_profile_cfg,
+ [ZXDH_FLOW_HW_ADD] = zxdh_vf_flow_hw_add,
+ [ZXDH_FLOW_HW_DEL] = zxdh_vf_flow_hw_del,
+ [ZXDH_FLOW_HW_GET] = zxdh_vf_flow_hw_get,
+ [ZXDH_FLOW_HW_FLUSH] = zxdh_vf_flow_hw_flush,
};
static inline int
@@ -2168,7 +2303,7 @@ zxdh_config_process_callback(struct zxdh_hw *hw, struct zxdh_msg_info *msg_info,
return -1;
}
if (zxdh_proc_cb[msghead->msg_type]) {
- ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport,
+ ret = zxdh_proc_cb[msghead->msg_type](hw, msghead->vport, msghead->pcieid,
(void *)&msg_info->data, res, res_len);
if (!ret)
ZXDH_SET(msg_reply_body, res, flag, ZXDH_REPS_SUCC);
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 7dad6f7335..c20bb98195 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -240,6 +240,11 @@ enum zxdh_msg_type {
ZXDH_PLCR_CAR_QUEUE_CFG_SET = 40,
ZXDH_PORT_METER_STAT_GET = 42,
+ ZXDH_FLOW_HW_ADD = 46,
+ ZXDH_FLOW_HW_DEL = 47,
+ ZXDH_FLOW_HW_GET = 48,
+ ZXDH_FLOW_HW_FLUSH = 49,
+
ZXDH_MSG_TYPE_END,
};
@@ -418,6 +423,21 @@ struct zxdh_ifc_mtr_profile_info_bits {
uint8_t profile_id[0x40];
};
+struct err_reason {
+ uint8_t err_type;
+ uint8_t rsv[3];
+ char reason[512];
+};
+
+struct zxdh_flow_op_rsp {
+ struct zxdh_flow dh_flow;
+ uint8_t rev[4];
+ union {
+ struct rte_flow_query_count count;
+ struct err_reason error;
+ };
+};
+
struct zxdh_ifc_msg_reply_body_bits {
uint8_t flag[0x8];
union {
@@ -432,6 +452,7 @@ struct zxdh_ifc_msg_reply_body_bits {
struct zxdh_ifc_agent_mac_module_eeprom_msg_bits module_eeprom_msg;
struct zxdh_ifc_mtr_profile_info_bits mtr_profile_info;
struct zxdh_ifc_mtr_stats_bits hw_mtr_stats;
+ struct zxdh_flow_op_rsp flow_rsp;
};
};
@@ -535,6 +556,10 @@ struct zxdh_plcr_profile_free {
uint16_t profile_id;
};
+struct zxdh_flow_op_msg {
+ struct zxdh_flow dh_flow;
+};
+
struct zxdh_msg_info {
union {
uint8_t head_len[ZXDH_MSG_HEAD_LEN];
@@ -561,13 +586,15 @@ struct zxdh_msg_info {
struct zxdh_plcr_profile_cfg zxdh_plcr_profile_cfg;
struct zxdh_plcr_flow_cfg zxdh_plcr_flow_cfg;
struct zxdh_mtr_stats_query zxdh_mtr_stats_query;
+ struct zxdh_flow_op_msg flow_msg;
} data;
};
typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
void *reps_buffer, uint16_t *reps_len, void *dev);
-typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport, void *cfg_data,
- void *res_info, uint16_t *res_len);
+typedef int (*zxdh_msg_process_callback)(struct zxdh_hw *hw, uint16_t vport,
+ uint16_t pcieid, void *cfg_data,
+ void *res_info, uint16_t *res_len);
typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
void *reps_buffer, uint16_t *reps_len, void *dev);
diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h
index cb34e38be8..a227e09962 100644
--- a/drivers/net/zxdh/zxdh_tables.h
+++ b/drivers/net/zxdh/zxdh_tables.h
@@ -7,6 +7,8 @@
#include <stdint.h>
+#include <zxdh_msg.h>
+
/* eram */
#define ZXDH_SDT_VPORT_ATT_TABLE 1
#define ZXDH_SDT_PANEL_ATT_TABLE 2
@@ -16,6 +18,8 @@
#define ZXDH_SDT_UNICAST_ATT_TABLE 10
#define ZXDH_SDT_MULTICAST_ATT_TABLE 11
#define ZXDH_SDT_PORT_VLAN_ATT_TABLE 16
+#define ZXDH_SDT_TUNNEL_ENCAP0_TABLE 28
+#define ZXDH_SDT_TUNNEL_ENCAP1_TABLE 29
/* hash */
#define ZXDH_SDT_L2_ENTRY_TABLE0 64
#define ZXDH_SDT_L2_ENTRY_TABLE1 65
@@ -27,12 +31,14 @@
#define ZXDH_SDT_MC_TABLE2 78
#define ZXDH_SDT_MC_TABLE3 79
+#define ZXDH_SDT_FD_TABLE 130
+
#define ZXDH_PORT_VHCA_FLAG 1
#define ZXDH_PORT_RSS_HASH_FACTOR_FLAG 3
#define ZXDH_PORT_HASH_ALG_FLAG 4
#define ZXDH_PORT_PHY_PORT_FLAG 5
#define ZXDH_PORT_LAG_ID_FLAG 6
-
+#define ZXDH_PORT_VXLAN_OFFLOAD_EN_OFF 7
#define ZXDH_PORT_PF_VQM_VFID_FLAG 8
#define ZXDH_PORT_MTU_FLAG 10
@@ -169,7 +175,7 @@ struct zxdh_port_attr_table {
uint8_t phy_port: 4;
uint16_t lag_id : 3;
- uint16_t rsv81 : 1;
+ uint16_t fd_vxlan_offload_en : 1;
uint16_t pf_vfid : 11;
uint16_t rsv82 : 1;
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 270900 bytes --]
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2 2/2] net/zxdh: add support flow director ops
2025-06-18 7:49 ` [PATCH v2 2/2] net/zxdh: add support flow director ops Bingbin Chen
@ 2025-06-30 16:56 ` Stephen Hemminger
0 siblings, 0 replies; 9+ messages in thread
From: Stephen Hemminger @ 2025-06-30 16:56 UTC (permalink / raw)
To: Bingbin Chen; +Cc: wang.junlong1, yang.yonggang, dev
On Wed, 18 Jun 2025 15:49:30 +0800
Bingbin Chen <chen.bingbin@zte.com.cn> wrote:
> Provide support for ETH, VLAN, IPv4/IPv6, TCP/UDP, VXLAN, and mask matching,
> supporting multiple actions include drop/count/mark/queue/rss,and vxlan decap/encap.
>
> Signed-off-by: Bingbin Chen <chen.bingbin@zte.com.cn>
> ---
There are several checkpatch warnings in this file.
These are the ones that should be fixed (ignore complaints about stdio and macro arg reuse, etc).
Please fix and resubmit the patch set for 25.07
WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#60:
Provide support for ETH, VLAN, IPv4/IPv6, TCP/UDP, VXLAN, and mask matching,
WARNING:TYPO_SPELLING: 'realease' may be misspelled - perhaps 'release'?
#1235: FILE: drivers/net/zxdh/zxdh_flow.c:904:
+ "realease handle_idx to hw failed");
WARNING:TYPO_SPELLING: 'realease' may be misspelled - perhaps 'release'?
#1426: FILE: drivers/net/zxdh/zxdh_flow.c:1095:
+ "realease handle_idx to hw failed");
WARNING:TYPO_SPELLING: 'realease' may be misspelled - perhaps 'release'?
#1429: FILE: drivers/net/zxdh/zxdh_flow.c:1098:
+ PMD_DRV_LOG(DEBUG, "realease handle_idx to hw succ! %d", handle_idx);
ERROR:C99_COMMENTS: do not use C99 // comments
#1620: FILE: drivers/net/zxdh/zxdh_flow.c:1289:
+ //ether_type = rte_be_to_cpu_16(eth_spec->type);
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2025-06-30 16:56 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-06-17 9:31 [PATCH v1 0/2] add support flow director ops Bingbin Chen
2025-06-17 9:32 ` [PATCH v1 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
2025-06-17 14:07 ` Stephen Hemminger
2025-06-17 14:08 ` Stephen Hemminger
2025-06-17 9:32 ` [PATCH v1 2/2] net/zxdh: add support flow director ops Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 0/2] " Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 1/2] net/zxdh: npsdk add flow director table ops Bingbin Chen
2025-06-18 7:49 ` [PATCH v2 2/2] net/zxdh: add support flow director ops Bingbin Chen
2025-06-30 16:56 ` Stephen Hemminger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).