* [dpdk-dev] [PATCH v5 1/4] net/hinic: increase Tx/Rx queues non-null judgment
2020-05-09 4:04 [dpdk-dev] [PATCH v5 0/4] Support ipv6 flow rules Xiaoyun wang
@ 2020-05-09 4:04 ` Xiaoyun wang
2020-05-11 19:09 ` Ferruh Yigit
2020-05-09 4:04 ` [dpdk-dev] [PATCH v5 2/4] net/hinic: add jumbo frame offload flag Xiaoyun wang
` (3 subsequent siblings)
4 siblings, 1 reply; 9+ messages in thread
From: Xiaoyun wang @ 2020-05-09 4:04 UTC (permalink / raw)
To: dev
Cc: ferruh.yigit, bluca, luoxianjun, luoxingyu, zhouguoyang,
shahar.belkar, yin.yinshi, david.yangxiaoliang, zhaohui8,
zhengjingzhou, Xiaoyun wang, stable
Increase tx_queues and rx_queues non-null judgment before free tx or
rx resources, and modify some formate for logs.
Fixes: 54faba2295bd ("net/hinic: adds Tx queue xstats members")
Cc: stable@dpdk.org
Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
drivers/net/hinic/base/hinic_pmd_cmdq.h | 2 +-
drivers/net/hinic/base/hinic_pmd_hwif.c | 2 +-
drivers/net/hinic/base/hinic_pmd_nicio.c | 1 -
drivers/net/hinic/hinic_pmd_ethdev.c | 13 ++++++-------
drivers/net/hinic/hinic_pmd_rx.c | 3 ++-
drivers/net/hinic/hinic_pmd_tx.c | 5 ++++-
6 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/drivers/net/hinic/base/hinic_pmd_cmdq.h b/drivers/net/hinic/base/hinic_pmd_cmdq.h
index 4ce0a4c..0d5e380 100644
--- a/drivers/net/hinic/base/hinic_pmd_cmdq.h
+++ b/drivers/net/hinic/base/hinic_pmd_cmdq.h
@@ -9,7 +9,7 @@
#define HINIC_SCMD_DATA_LEN 16
-/* hiovs pmd use 64, kernel l2nic use 4096 */
+/* pmd driver uses 64, kernel l2nic use 4096 */
#define HINIC_CMDQ_DEPTH 64
#define HINIC_CMDQ_BUF_SIZE 2048U
diff --git a/drivers/net/hinic/base/hinic_pmd_hwif.c b/drivers/net/hinic/base/hinic_pmd_hwif.c
index 63fba0d..1839df0 100644
--- a/drivers/net/hinic/base/hinic_pmd_hwif.c
+++ b/drivers/net/hinic/base/hinic_pmd_hwif.c
@@ -99,7 +99,7 @@ void hinic_set_pf_status(struct hinic_hwif *hwif, enum hinic_pf_status status)
u32 addr = HINIC_CSR_FUNC_ATTR5_ADDR;
if (hwif->attr.func_type == TYPE_VF) {
- PMD_DRV_LOG(ERR, "VF doesn't support set attr5");
+ PMD_DRV_LOG(INFO, "VF doesn't support set attr5");
return;
}
diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c b/drivers/net/hinic/base/hinic_pmd_nicio.c
index 60c4e14..7f7e11d 100644
--- a/drivers/net/hinic/base/hinic_pmd_nicio.c
+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c
@@ -369,7 +369,6 @@ static int init_rq_ctxts(struct hinic_nic_io *nic_io)
HINIC_MOD_L2NIC,
HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
cmd_buf, &out_param, 0);
-
if ((err) || out_param != 0) {
PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
err = -EFAULT;
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index cfbca64..5fcff81 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -354,7 +354,7 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
return err;
}
- /*clear fdir filter flag in function table*/
+ /* clear fdir filter flag in function table */
hinic_free_fdir_filter(nic_dev);
return HINIC_OK;
@@ -440,7 +440,7 @@ static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
nic_dev->rxqs[queue_idx] = rxq;
- /* alloc rx sq hw wqepage*/
+ /* alloc rx sq hw wqepage */
rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
if (rc) {
PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
@@ -2070,7 +2070,7 @@ static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
indirtbl[i] = reta_conf[idx].reta[shift];
}
- for (i = 0 ; i < reta_size; i++) {
+ for (i = 0; i < reta_size; i++) {
if (indirtbl[i] >= nic_dev->num_rq) {
PMD_DRV_LOG(ERR, "Invalid reta entry, index: %d, num_rq: %d",
i, nic_dev->num_rq);
@@ -2300,8 +2300,7 @@ static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "%s",
- hinic_vport_stats_strings[i].name);
+ "%s", hinic_vport_stats_strings[i].name);
count++;
}
@@ -2312,13 +2311,13 @@ static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
- "%s",
- hinic_phyport_stats_strings[i].name);
+ "%s", hinic_phyport_stats_strings[i].name);
count++;
}
return count;
}
+
/**
* DPDK callback to set mac address
*
diff --git a/drivers/net/hinic/hinic_pmd_rx.c b/drivers/net/hinic/hinic_pmd_rx.c
index 4ca74f0..a49769a 100644
--- a/drivers/net/hinic/hinic_pmd_rx.c
+++ b/drivers/net/hinic/hinic_pmd_rx.c
@@ -413,7 +413,8 @@ void hinic_free_all_rx_resources(struct rte_eth_dev *eth_dev)
HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
- eth_dev->data->rx_queues[q_id] = NULL;
+ if (eth_dev->data->rx_queues != NULL)
+ eth_dev->data->rx_queues[q_id] = NULL;
if (nic_dev->rxqs[q_id] == NULL)
continue;
diff --git a/drivers/net/hinic/hinic_pmd_tx.c b/drivers/net/hinic/hinic_pmd_tx.c
index 258f2c1..bd39f93 100644
--- a/drivers/net/hinic/hinic_pmd_tx.c
+++ b/drivers/net/hinic/hinic_pmd_tx.c
@@ -313,6 +313,8 @@ static inline struct rte_mbuf *hinic_copy_tx_mbuf(struct hinic_nic_dev *nic_dev,
mbuf = mbuf->next;
}
+ dst_mbuf->pkt_len = dst_mbuf->data_len;
+
return dst_mbuf;
}
@@ -1217,7 +1219,8 @@ void hinic_free_all_tx_resources(struct rte_eth_dev *eth_dev)
HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
- eth_dev->data->tx_queues[q_id] = NULL;
+ if (eth_dev->data->tx_queues != NULL)
+ eth_dev->data->tx_queues[q_id] = NULL;
if (nic_dev->txqs[q_id] == NULL)
continue;
--
1.8.3.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* [dpdk-dev] [PATCH v5 3/4] net/hinic: increase judgment for support NIC or not
2020-05-09 4:04 [dpdk-dev] [PATCH v5 0/4] Support ipv6 flow rules Xiaoyun wang
2020-05-09 4:04 ` [dpdk-dev] [PATCH v5 1/4] net/hinic: increase Tx/Rx queues non-null judgment Xiaoyun wang
2020-05-09 4:04 ` [dpdk-dev] [PATCH v5 2/4] net/hinic: add jumbo frame offload flag Xiaoyun wang
@ 2020-05-09 4:04 ` Xiaoyun wang
2020-05-09 4:04 ` [dpdk-dev] [PATCH v5 4/4] net/hinic/base: support ipv6 flow rules Xiaoyun wang
2020-05-11 19:04 ` [dpdk-dev] [PATCH v5 0/4] Support " Ferruh Yigit
4 siblings, 0 replies; 9+ messages in thread
From: Xiaoyun wang @ 2020-05-09 4:04 UTC (permalink / raw)
To: dev
Cc: ferruh.yigit, bluca, luoxianjun, luoxingyu, zhouguoyang,
shahar.belkar, yin.yinshi, david.yangxiaoliang, zhaohui8,
zhengjingzhou, Xiaoyun wang
If hardware mode for this card does not support nic,
the network features for this card will not be supported
and driver will be initialized failed.
Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
drivers/net/hinic/base/hinic_pmd_cfg.c | 3 ++-
drivers/net/hinic/hinic_pmd_ethdev.c | 6 +++++-
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hinic/base/hinic_pmd_cfg.c b/drivers/net/hinic/base/hinic_pmd_cfg.c
index aa883e0..2d25dc9 100644
--- a/drivers/net/hinic/base/hinic_pmd_cfg.c
+++ b/drivers/net/hinic/base/hinic_pmd_cfg.c
@@ -129,7 +129,7 @@ static void hinic_parse_pub_res_cap(struct service_cap *cap,
cap->max_rqs = dev_cap->nic_max_rq;
}
- cap->chip_svc_type = CFG_SVC_NIC_BIT0;
+ cap->chip_svc_type = dev_cap->svc_cap_en;
cap->host_total_function = dev_cap->host_total_func;
cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val;
@@ -140,6 +140,7 @@ static void hinic_parse_pub_res_cap(struct service_cap *cap,
PMD_DRV_LOG(INFO, "host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x",
cap->host_total_function, cap->host_oq_id_mask_val,
cap->max_vf);
+ PMD_DRV_LOG(INFO, "chip_svc_type: 0x%x", cap->chip_svc_type);
PMD_DRV_LOG(INFO, "pf_num: 0x%x, pf_id_start: 0x%x, vf_num: 0x%x, vf_id_start: 0x%x",
cap->pf_num, cap->pf_id_start,
cap->vf_num, cap->vf_id_start);
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.c b/drivers/net/hinic/hinic_pmd_ethdev.c
index 85e7c3c..cea026a 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.c
+++ b/drivers/net/hinic/hinic_pmd_ethdev.c
@@ -2811,8 +2811,12 @@ static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
}
/* get nic capability */
- if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap))
+ if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) {
+ PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s",
+ eth_dev->data->name);
+ rc = -EINVAL;
goto nic_check_fail;
+ }
/* init root cla and function table */
rc = hinic_init_nicio(nic_dev->hwdev);
--
1.8.3.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* [dpdk-dev] [PATCH v5 4/4] net/hinic/base: support ipv6 flow rules
2020-05-09 4:04 [dpdk-dev] [PATCH v5 0/4] Support ipv6 flow rules Xiaoyun wang
` (2 preceding siblings ...)
2020-05-09 4:04 ` [dpdk-dev] [PATCH v5 3/4] net/hinic: increase judgment for support NIC or not Xiaoyun wang
@ 2020-05-09 4:04 ` Xiaoyun wang
2020-05-11 20:12 ` Ferruh Yigit
2020-05-11 19:04 ` [dpdk-dev] [PATCH v5 0/4] Support " Ferruh Yigit
4 siblings, 1 reply; 9+ messages in thread
From: Xiaoyun wang @ 2020-05-09 4:04 UTC (permalink / raw)
To: dev
Cc: ferruh.yigit, bluca, luoxianjun, luoxingyu, zhouguoyang,
shahar.belkar, yin.yinshi, david.yangxiaoliang, zhaohui8,
zhengjingzhou, Xiaoyun wang
The patch supports ipv6 flow rules for BGP or ICMP packets.
Signed-off-by: Xiaoyun wang <cloud.wangxiaoyun@huawei.com>
---
drivers/net/hinic/hinic_pmd_ethdev.h | 57 +++++++++-
drivers/net/hinic/hinic_pmd_flow.c | 209 ++++++++++++++++++++++++++++++++---
2 files changed, 249 insertions(+), 17 deletions(-)
diff --git a/drivers/net/hinic/hinic_pmd_ethdev.h b/drivers/net/hinic/hinic_pmd_ethdev.h
index 3322fb9..1cb389d 100644
--- a/drivers/net/hinic/hinic_pmd_ethdev.h
+++ b/drivers/net/hinic/hinic_pmd_ethdev.h
@@ -99,6 +99,7 @@ struct hinic_hw_fdir_mask {
uint16_t tunnel_flag;
uint16_t tunnel_inner_src_port_mask;
uint16_t tunnel_inner_dst_port_mask;
+ uint16_t dst_ipv6_mask;
};
/* Flow Director attribute */
@@ -111,6 +112,7 @@ struct hinic_atr_input {
uint16_t tunnel_flag;
uint16_t tunnel_inner_src_port;
uint16_t tunnel_inner_dst_port;
+ uint8_t dst_ipv6[16];
};
enum hinic_fdir_mode {
@@ -191,9 +193,60 @@ struct tag_tcam_key_mem {
#endif
};
+struct tag_tcam_key_ipv6_mem {
+#if (RTE_BYTE_ORDER == RTE_BIG_ENDIAN)
+ u32 rsvd0:16;
+ u32 ipv6_flag:1;
+ u32 protocol:7;
+ u32 function_id:8;
+
+ u32 dst_port:16;
+ u32 ipv6_key0:16;
+
+ u32 ipv6_key1:16;
+ u32 ipv6_key2:16;
+
+ u32 ipv6_key3:16;
+ u32 ipv6_key4:16;
+
+ u32 ipv6_key5:16;
+ u32 ipv6_key6:16;
+
+ u32 ipv6_key7:16;
+ u32 rsvd2:16;
+#else
+ u32 function_id:8;
+ u32 protocol:7;
+ u32 ipv6_flag:1;
+ u32 rsvd0:16;
+
+ u32 ipv6_key0:16;
+ u32 dst_port:16;
+
+ u32 ipv6_key2:16;
+ u32 ipv6_key1:16;
+
+ u32 ipv6_key4:16;
+ u32 ipv6_key3:16;
+
+ u32 ipv6_key6:16;
+ u32 ipv6_key5:16;
+
+ u32 rsvd2:16;
+ u32 ipv6_key7:16;
+#endif
+};
+
struct tag_tcam_key {
- struct tag_tcam_key_mem key_info;
- struct tag_tcam_key_mem key_mask;
+ union {
+ struct tag_tcam_key_mem key_info;
+ struct tag_tcam_key_ipv6_mem key_info_ipv6;
+ };
+
+ union {
+ struct tag_tcam_key_mem key_mask;
+ struct tag_tcam_key_ipv6_mem key_mask_ipv6;
+ };
};
struct hinic_fdir_rule {
diff --git a/drivers/net/hinic/hinic_pmd_flow.c b/drivers/net/hinic/hinic_pmd_flow.c
index e620df0..cc0744d 100644
--- a/drivers/net/hinic/hinic_pmd_flow.c
+++ b/drivers/net/hinic/hinic_pmd_flow.c
@@ -49,6 +49,7 @@
#define IP_HEADER_PROTOCOL_TYPE_TCP 6
#define IP_HEADER_PROTOCOL_TYPE_UDP 17
#define IP_HEADER_PROTOCOL_TYPE_ICMP 1
+#define IP_HEADER_PROTOCOL_TYPE_ICMPV6 58
#define FDIR_TCAM_NORMAL_PACKET 0
#define FDIR_TCAM_TUNNEL_PACKET 1
@@ -62,6 +63,9 @@
#define TCAM_PKT_BGP_DPORT 3
#define TCAM_PKT_LACP 4
+#define TCAM_DIP_IPV4_TYPE 0
+#define TCAM_DIP_IPV6_TYPE 1
+
#define BGP_DPORT_ID 179
#define IPPROTO_VRRP 112
@@ -836,7 +840,8 @@ static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
}
/* Check if the next not void item is IPv4 */
item = next_no_void_pattern(pattern, item);
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Not supported by fdir filter,support mac,ipv4");
@@ -855,7 +860,10 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
{
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item *item = *in_out_item;
+ int i;
/* Get the IPv4 info */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -921,6 +929,76 @@ static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
"Not supported by fdir filter, support tcp, udp, end");
return -rte_errno;
}
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Not supported last point for range */
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid fdir filter mask");
+ return -rte_errno;
+ }
+
+ ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
+
+ /* Only support dst addresses, others should be masked */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support dst ipv6");
+ return -rte_errno;
+ }
+
+ /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
+ for (i = 0; i < 16; i++) {
+ if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, do not support src ipv6");
+ return -rte_errno;
+ }
+ }
+
+ if (!item->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, ipv6 spec is NULL");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
+ rule->mask.dst_ipv6_mask |= 1 << i;
+ }
+
+ ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->hinic_fdir.dst_ipv6,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /*
+ * Check if the next not void item is TCP or UDP or ICMP.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
+ item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
+ memset(rule, 0, sizeof(struct hinic_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not supported by fdir filter, support tcp, udp, icmp");
+ return -rte_errno;
+ }
}
*in_out_item = item;
@@ -937,7 +1015,7 @@ static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by normal fdir filter,not support l4");
+ item, "Not supported by normal fdir filter, not support l4");
return -rte_errno;
}
@@ -954,7 +1032,7 @@ static int hinic_normal_item_check_end(const struct rte_flow_item *item,
memset(rule, 0, sizeof(struct hinic_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter,support end");
+ item, "Not supported by fdir filter, support end");
return -rte_errno;
}
@@ -967,9 +1045,9 @@ static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
struct rte_flow_error *error)
{
if (hinic_normal_item_check_ether(&item, pattern, error) ||
- hinic_normal_item_check_ip(&item, pattern, rule, error) ||
- hinic_normal_item_check_l4(&item, pattern, rule, error) ||
- hinic_normal_item_check_end(item, rule, error))
+ hinic_normal_item_check_ip(&item, pattern, rule, error) ||
+ hinic_normal_item_check_l4(&item, pattern, rule, error) ||
+ hinic_normal_item_check_end(item, rule, error))
return -rte_errno;
return 0;
@@ -991,6 +1069,10 @@ static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
rule->mode = HINIC_FDIR_MODE_TCAM;
rule->mask.proto_mask = UINT16_MAX;
rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
+ rule->mode = HINIC_FDIR_MODE_TCAM;
+ rule->mask.proto_mask = UINT16_MAX;
+ rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
} else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
rule->mode = HINIC_FDIR_MODE_TCAM;
} else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
@@ -2378,7 +2460,7 @@ static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
false, fdir_info.fdir_key, true,
fdir_info.fdir_flag);
if (ret) {
- PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
+ PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
fdir_info.fdir_flag, fdir_info.qid,
fdir_info.fdir_key);
return -ENOENT;
@@ -2420,10 +2502,9 @@ static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
TCAM_FLOW_KEY_SIZE);
}
-static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
- struct hinic_fdir_rule *rule,
- struct tag_tcam_key *tcam_key,
- struct tag_tcam_cfg_rule *fdir_tcam_rule)
+static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
@@ -2502,8 +2583,106 @@ static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
}
tcam_key->key_mask.function_id = UINT16_MAX;
+ tcam_key->key_info.function_id =
+ hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
+
+ return 0;
+}
+
+static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- tcam_key->key_info.function_id = hinic_global_func_id(nic_dev->hwdev);
+ switch (rule->mask.dst_ipv6_mask) {
+ case UINT16_MAX:
+ tcam_key->key_info_ipv6.ipv6_key0 =
+ ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[1];
+ tcam_key->key_info_ipv6.ipv6_key1 =
+ ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[3];
+ tcam_key->key_info_ipv6.ipv6_key2 =
+ ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[5];
+ tcam_key->key_info_ipv6.ipv6_key3 =
+ ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[7];
+ tcam_key->key_info_ipv6.ipv6_key4 =
+ ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[9];
+ tcam_key->key_info_ipv6.ipv6_key5 =
+ ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[11];
+ tcam_key->key_info_ipv6.ipv6_key6 =
+ ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[13];
+ tcam_key->key_info_ipv6.ipv6_key7 =
+ ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
+ rule->hinic_fdir.dst_ipv6[15];
+ tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
+ tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
+ return -EINVAL;
+ }
+
+ if (rule->mask.dst_port_mask > 0) {
+ tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
+ tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
+ }
+
+ switch (rule->mask.proto_mask) {
+ case UINT16_MAX:
+ tcam_key->key_info_ipv6.protocol =
+ (rule->hinic_fdir.proto) & 0x7F;
+ tcam_key->key_mask_ipv6.protocol = 0x7F;
+ break;
+
+ case 0:
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
+ return -EINVAL;
+ }
+
+ tcam_key->key_info_ipv6.ipv6_flag = 1;
+ tcam_key->key_mask_ipv6.ipv6_flag = 1;
+
+ tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
+ tcam_key->key_info_ipv6.function_id =
+ (u8)hinic_global_func_id(nic_dev->hwdev);
+
+ return 0;
+}
+
+static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
+ struct hinic_fdir_rule *rule,
+ struct tag_tcam_key *tcam_key,
+ struct tag_tcam_cfg_rule *fdir_tcam_rule)
+{
+ int ret = -1;
+
+ if (rule->mask.dst_ipv4_mask == UINT32_MAX)
+ ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
+ else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
+ ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
+
+ if (ret < 0)
+ return ret;
fdir_tcam_rule->data.qid = rule->queue;
@@ -2695,7 +2874,7 @@ static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
if (ret) {
- PMD_DRV_LOG(ERR, "Init hiovs fdir info failed!");
+ PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
return ret;
}
@@ -2711,7 +2890,7 @@ static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
}
if (add) {
- tcam_filter = rte_zmalloc("hiovs_5tuple_filter",
+ tcam_filter = rte_zmalloc("hinic_5tuple_filter",
sizeof(struct hinic_tcam_filter), 0);
if (tcam_filter == NULL)
return -ENOMEM;
@@ -2728,7 +2907,7 @@ static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
rule->tcam_index = fdir_tcam_rule.index;
} else {
- PMD_DRV_LOG(ERR, "Begin to hiovs_del_tcam_filter");
+ PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
ret = hinic_del_tcam_filter(dev, tcam_filter);
if (ret < 0)
return ret;
--
1.8.3.1
^ permalink raw reply [flat|nested] 9+ messages in thread