* [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF
@ 2020-03-13 2:07 Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao
` (7 more replies)
0 siblings, 8 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:07 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye
A DCF (Device Config Function) framework has been add for intel device,
this patch set add add switch filter support for it, this set also fix
bugs which block this feature.
This patchset is based on:
[1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
Depends-on: series-8859
wei zhao (7):
net/ice: enable switch flow on DCF
net/ice: support for more PPPoE input set
net/ice: change swicth parser to support flexible mask
net/ice: add support for MAC VLAN rule
net/ice: change default tunnle type
net/ice: add action number check for swicth
net/ice: fix input set of VLAN item
config/common_linux | 1 +
drivers/net/ice/ice_dcf_ethdev.c | 10 +-
drivers/net/ice/ice_dcf_parent.c | 7 +
drivers/net/ice/ice_fdir_filter.c | 6 +
drivers/net/ice/ice_generic_flow.c | 13 +
drivers/net/ice/ice_generic_flow.h | 9 +
drivers/net/ice/ice_hash.c | 6 +
drivers/net/ice/ice_switch_filter.c | 461 ++++++++++++++++++----------
8 files changed, 348 insertions(+), 165 deletions(-)
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set Wei Zhao
` (6 subsequent siblings)
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao
DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_dcf_ethdev.c | 10 ++++++--
drivers/net/ice/ice_dcf_parent.c | 7 ++++++
drivers/net/ice/ice_fdir_filter.c | 6 +++++
drivers/net/ice/ice_hash.c | 6 +++++
drivers/net/ice/ice_switch_filter.c | 39 ++++++++++++++++++++++++++++-
5 files changed, 65 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index f65b962d4..759d92afb 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
- __rte_unused enum rte_filter_op filter_op,
- __rte_unused void *arg)
+ enum rte_filter_op filter_op,
+ void *arg)
{
int ret = 0;
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ice_flow_ops;
+ break;
+
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index bca9cd34a..c2dc13936 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -314,6 +314,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ err = ice_flow_init(parent_adapter);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ goto uninit_hw;
+ }
+
ice_dcf_update_vf_vsi_map(parent_hw,
hw->num_vfs, hw->vf_vsi_map);
@@ -344,5 +350,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
rte_eal_alarm_cancel(ice_dcf_vsi_update_service_handler,
&adapter->real_hw);
+ ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
}
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index d737c1acd..c9343c1fa 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
struct ice_flow_parser *parser;
int ret;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
ret = ice_fdir_setup(pf);
if (ret)
return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
struct ice_pf *pf = &ad->pf;
struct ice_flow_parser *parser;
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
parser = &ice_fdir_parser_comms;
else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index d891538bd..69d805248 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
{
struct ice_flow_parser *parser = NULL;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
parser = &ice_hash_parser_os;
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -560,6 +563,9 @@ ice_hash_destroy(struct ice_adapter *ad,
static void
ice_hash_uninit(struct ice_adapter *ad)
{
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
ice_unregister_parser(&ice_hash_parser_os, ad);
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4a9356b31..c55e44e1a 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info)
+{
+ const struct rte_flow_action_vf *act_vf;
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+ act_vf = action->conf;
+ rule_info->sw_act.vsi_handle = act_vf->id;
+ break;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+ }
+ }
+
+ rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+ rule_info->rx = 1;
+ rule_info->priority = 5;
+
+ return 0;
+}
static int
ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
- ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+ if (ad->hw.dcf_enabled)
+ ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+ else
+ ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
if (ret) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask Wei Zhao
` (5 subsequent siblings)
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao
This patch add more support for PPPoE packet,
it enable swicth filter to direct PPPoE packet base on
session id and PPP protocol type.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
config/common_linux | 1 +
drivers/net/ice/ice_generic_flow.c | 13 +++++
drivers/net/ice/ice_generic_flow.h | 9 ++++
drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
4 files changed, 100 insertions(+), 5 deletions(-)
diff --git a/config/common_linux b/config/common_linux
index 816810671..c6630d2bd 100644
--- a/config/common_linux
+++ b/config/common_linux
@@ -8,6 +8,7 @@ CONFIG_RTE_EXEC_ENV_LINUX=y
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_LIBRTE_KNI=y
CONFIG_RTE_LIBRTE_PMD_KNI=y
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 38ac799d8..af0fff814 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index adc30ee2a..f1139c690 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
#define ICE_PROT_VXLAN (1ULL << 19)
#define ICE_PROT_NVGRE (1ULL << 20)
#define ICE_PROT_GTPU (1ULL << 21)
+#define ICE_PROT_PPPoE (1ULL << 22)
/* field */
@@ -49,6 +50,8 @@
#define ICE_NVGRE_TNI (1ULL << 50)
#define ICE_GTPU_TEID (1ULL << 49)
#define ICE_GTPU_QFI (1ULL << 48)
+#define ICE_PPPOE_SESSION (1ULL << 47)
+#define ICE_PPPOE_PROTO (1ULL << 46)
/* input set */
@@ -177,6 +180,10 @@
(ICE_PROT_GTPU | ICE_GTPU_TEID)
#define ICE_INSET_GTPU_QFI \
(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+ (ICE_PROT_PPPoE | ICE_PPPOE_SESSION)
+#define ICE_INSET_PPPOE_PROTO \
+ (ICE_PROT_PPPoE | ICE_PPPOE_PROTO)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
@@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoed[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoed[];
extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c55e44e1a..39b5c7266 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
ICE_INSET_TUN_IPV4_TOS)
#define ICE_SW_INSET_MAC_PPPOE ( \
ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
- ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
+ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+ ICE_INSET_PPPOE_PROTO)
struct sw_meta {
struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes,
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
};
static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+ const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+ *pppoe_proto_mask;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
+ uint16_t pppoe_valid = 0;
for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
pppoe_mask = item->mask;
/* Check if PPPoE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
- if (pppoe_spec || pppoe_mask) {
+ if ((!pppoe_spec && pppoe_mask) ||
+ (pppoe_spec && !pppoe_mask)) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pppoe item");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe item");
return 0;
}
+ if (pppoe_spec && pppoe_mask) {
+ /* Check pppoe mask and update input set */
+ if (pppoe_mask->length ||
+ pppoe_mask->code ||
+ pppoe_mask->version_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe mask");
+ return 0;
+ }
+ list[t].type = ICE_PPPOE;
+ if (pppoe_mask->session_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.session_id =
+ pppoe_spec->session_id;
+ list[t].m_u.pppoe_hdr.session_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_SESSION;
+ }
+ t++;
+ pppoe_valid = 1;
+ } else if (!pppoe_spec && !pppoe_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+ pppoe_proto_spec = item->spec;
+ pppoe_proto_mask = item->mask;
+ /* Check if PPPoE optional proto_id item
+ * is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+ (pppoe_proto_spec && !pppoe_proto_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe proto item");
+ return 0;
+ }
+ if (pppoe_proto_spec && pppoe_proto_mask) {
+ if (pppoe_valid)
+ t--;
+ list[t].type = ICE_PPPOE;
+ if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.ppp_prot_id =
+ pppoe_proto_spec->proto_id;
+ list[t].m_u.pppoe_hdr.ppp_prot_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_PROTO;
+ }
+ t++;
+ } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule Wei Zhao
` (4 subsequent siblings)
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao
DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 298 +++++++++++++---------------
1 file changed, 133 insertions(+), 165 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 39b5c7266..af7e9cb0b 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -351,19 +348,29 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_TUN_SMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_TUN_DMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ if (tunnel_valid)
+ input_set |=
+ ICE_INSET_TUN_SMAC;
+ else
+ input_set |=
+ ICE_INSET_SMAC;
+ break;
+ }
+ }
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->dst.addr_bytes[j]) {
+ if (tunnel_valid)
+ input_set |=
+ ICE_INSET_TUN_DMAC;
+ else
+ input_set |=
+ ICE_INSET_DMAC;
+ break;
+ }
+ }
+ if (eth_mask->type)
input_set |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +380,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->src.addr_bytes[j]) {
h->src_addr[j] =
eth_spec->src.addr_bytes[j];
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
}
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
eth_spec->dst.addr_bytes[j];
m->dst_addr[j] =
@@ -392,17 +397,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (i)
t++;
- if (eth_mask->type == UINT16_MAX) {
+ if (eth_mask->type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
+ eth_mask->type;
t++;
}
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
}
break;
@@ -423,81 +425,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_TUN_IPV4_TOS;
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |=
ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |=
ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |=
ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_TUN_IPV4_PROTO;
} else {
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_IPV4_TOS;
}
list[t].type = (tunnel_valid == 0) ?
ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.src_addr) {
list[t].h_u.ipv4_hdr.src_addr =
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.src_addr;
}
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.dst_addr;
}
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
+ ipv4_mask->hdr.time_to_live;
}
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
+ ipv4_mask->hdr.next_proto_id;
}
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
+ if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ list[t].m_u.ipv4_hdr.tos =
+ ipv4_mask->hdr.type_of_service;
}
t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tunnel_valid == 0) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -514,51 +503,58 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
input_set |=
- ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ ICE_INSET_TUN_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
input_set |=
- ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ ICE_INSET_TUN_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto)
input_set |=
ICE_INSET_TUN_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
+ if (ipv6_mask->hdr.hop_limits)
input_set |=
ICE_INSET_TUN_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
input_set |=
ICE_INSET_TUN_IPV6_TC;
} else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
+ input_set |=
+ ICE_INSET_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto)
input_set |=
ICE_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
+ if (ipv6_mask->hdr.hop_limits)
input_set |=
ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
input_set |= ICE_INSET_IPV6_TC;
}
list[t].type = (tunnel_valid == 0) ?
@@ -568,35 +564,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr[j]) {
f->src_addr[j] =
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
}
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
}
}
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ if (ipv6_mask->hdr.proto) {
f->next_hdr =
ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
+ s->next_hdr =
+ ipv6_mask->hdr.proto;
}
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
+ s->hop_limit =
+ ipv6_mask->hdr.hop_limits;
}
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK)) {
struct ice_le_ver_tc_flow vtf;
vtf.u.fld.version = 0;
@@ -606,13 +600,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
- vtf.u.fld.tc = UINT8_MAX;
+ vtf.u.fld.tc = (rte_be_to_cpu_32
+ (ipv6_mask->hdr.vtc_flow) &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
}
t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -631,21 +625,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_UDP_DST_PORT;
} else {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_UDP_DST_PORT;
}
@@ -654,21 +644,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].type = ICE_UDP_OF;
else
list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
+ if (udp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
}
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
}
t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
}
break;
@@ -692,40 +680,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_TCP_DST_PORT;
} else {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TCP_DST_PORT;
}
list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ if (tcp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
}
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
}
t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
}
break;
@@ -743,40 +725,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_SCTP_DST_PORT;
} else {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_SCTP_DST_PORT;
}
list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ if (sctp_mask->hdr.src_port) {
list[t].h_u.sctp_hdr.src_port =
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
}
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
}
t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
}
break;
@@ -799,21 +775,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
+ if (vxlan_mask->vni[0] ||
+ vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) {
list[t].h_u.tnl_hdr.vni =
(vxlan_spec->vni[2] << 16) |
(vxlan_spec->vni[1] << 8) |
vxlan_spec->vni[0];
list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
+ (vxlan_mask->vni[2] << 16) |
+ (vxlan_mask->vni[1] << 8) |
+ vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
}
t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
}
break;
@@ -835,21 +811,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
+ if (nvgre_mask->tni[0] ||
+ nvgre_mask->tni[1] ||
+ nvgre_mask->tni[2]) {
list[t].h_u.nvgre_hdr.tni_flow =
(nvgre_spec->tni[2] << 16) |
(nvgre_spec->tni[1] << 8) |
nvgre_spec->tni[0];
list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
+ (nvgre_mask->tni[2] << 16) |
+ (nvgre_mask->tni[1] << 8) |
+ nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
}
t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
}
break;
@@ -870,23 +846,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (vlan_spec && vlan_mask) {
list[t].type = ICE_VLAN_OFOS;
- if (vlan_mask->tci == UINT16_MAX) {
+ if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
- UINT16_MAX;
+ vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
}
- if (vlan_mask->inner_type == UINT16_MAX) {
+ if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
- UINT16_MAX;
+ vlan_mask->inner_type;
input_set |= ICE_INSET_VLAN_OUTER;
}
t++;
- } else if (!vlan_spec && !vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
}
break;
@@ -918,19 +892,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
list[t].type = ICE_PPPOE;
- if (pppoe_mask->session_id == UINT16_MAX) {
+ if (pppoe_mask->session_id) {
list[t].h_u.pppoe_hdr.session_id =
pppoe_spec->session_id;
list[t].m_u.pppoe_hdr.session_id =
- UINT16_MAX;
+ pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
}
t++;
pppoe_valid = 1;
- } else if (!pppoe_spec && !pppoe_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +924,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (pppoe_valid)
t--;
list[t].type = ICE_PPPOE;
- if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ if (pppoe_proto_mask->proto_id) {
list[t].h_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_spec->proto_id;
list[t].m_u.pppoe_hdr.ppp_prot_id =
- UINT16_MAX;
+ pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
}
t++;
- } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
` (2 preceding siblings ...)
2020-03-13 2:08 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type Wei Zhao
` (3 subsequent siblings)
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao
This patch add support for MAC VLAN rule,
it enable swicth filter to direct packet base on
mac address and vlan id.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index af7e9cb0b..20d0577b5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -107,6 +110,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_os[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
` (3 preceding siblings ...)
2020-03-13 2:08 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth Wei Zhao
` (2 subsequent siblings)
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao
The default tunnle type for swicth filter change to new
defination of ICE_SW_TUN_AND_NON_TUN in order that the rule
will be apply to more packet type.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 20d0577b5..7ca922602 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1097,7 +1097,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
uint16_t lkups_num = 0;
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ enum ice_sw_tunnel_type tun_type =
+ ICE_SW_TUN_AND_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
` (4 preceding siblings ...)
2020-03-13 2:08 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao
The action number can only be one for DCF or PF
switch filter, not support large action.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 7ca922602..48d689deb 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1079,6 +1079,46 @@ ice_switch_parse_action(struct ice_pf *pf,
return -rte_errno;
}
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (actions_num > 1) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1164,6 +1204,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ ret = ice_switch_check_action(actions, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Invalid input action number");
+ goto error;
+ }
+
if (ad->hw.dcf_enabled)
ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
else
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
` (5 preceding siblings ...)
2020-03-13 2:08 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth Wei Zhao
@ 2020-03-13 2:08 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
7 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-03-13 2:08 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, stable, Wei Zhao
The input set for inner type of vlan item should
be ICE_INSET_ETHERTYPE, not ICE_INSET_VLAN_OUTER.
This mac vlan filter is also part of DCF switch filter.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 48d689deb..ecd7c75aa 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -867,7 +867,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
vlan_mask->inner_type;
- input_set |= ICE_INSET_VLAN_OUTER;
+ input_set |= ICE_INSET_ETHERTYPE;
}
t++;
}
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
` (6 preceding siblings ...)
2020-03-13 2:08 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF Wei Zhao
` (13 more replies)
7 siblings, 14 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang
A DCF (Device Config Function) framework has been add for intel device,
this patch set add add switch filter support for it, this set also fix
bugs which block this feature.
This patchset is based on:
[1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
Depends-on: series-8859
v2:
-add switch filter support for AH/ESP/PFCP packet
-fix some patch check warning
-add flow redirect on switch patch
Beilei Xing (2):
net/ice: enable flow redirect on switch
net/ice: redirect switch rule to new VSI
Wei Zhao (11):
net/ice: enable switch flow on DCF
net/ice: support for more PPPoE input set
net/ice: change swicth parser to support flexible mask
net/ice: add support for MAC VLAN rule
net/ice: change default tunnle type
net/ice: add action number check for swicth
net/ice: add RTE_FLOW support for ESP/AH/L2TP
net/ice: add support for PFCP
net/ice: add support for NAT-T
net/ice: add support for permit mode
net/ice: fix input set of VLAN item
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +-
drivers/net/ice/ice_dcf_parent.c | 30 +-
drivers/net/ice/ice_fdir_filter.c | 6 +
drivers/net/ice/ice_generic_flow.c | 61 +++
drivers/net/ice/ice_generic_flow.h | 26 +
drivers/net/ice/ice_hash.c | 6 +
drivers/net/ice/ice_switch_filter.c | 731 +++++++++++++++++++------
8 files changed, 685 insertions(+), 187 deletions(-)
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set Wei Zhao
` (12 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +++++--
drivers/net/ice/ice_dcf_parent.c | 8 ++++++
drivers/net/ice/ice_fdir_filter.c | 6 ++++
drivers/net/ice/ice_hash.c | 6 ++++
drivers/net/ice/ice_switch_filter.c | 39 +++++++++++++++++++++++++-
6 files changed, 67 insertions(+), 4 deletions(-)
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 9bc647284..bde7e47fb 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -68,7 +68,7 @@ New Features
Updated the Intel ice driver with new features and improvements, including:
* Added support for DCF (Device Config Function) feature.
-
+ * Added switch filter support for intel DCF.
Removed Items
-------------
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index af94caeff..e5ba1a61f 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
- __rte_unused enum rte_filter_op filter_op,
- __rte_unused void *arg)
+ enum rte_filter_op filter_op,
+ void *arg)
{
int ret = 0;
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ice_flow_ops;
+ break;
+
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index ff08292a1..37f0e2be2 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -9,6 +9,7 @@
#include <rte_spinlock.h>
#include "ice_dcf_ethdev.h"
+#include "ice_generic_flow.h"
#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
@@ -321,6 +322,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ err = ice_flow_init(parent_adapter);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ goto uninit_hw;
+ }
+
ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
@@ -347,5 +354,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = NULL;
+ ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
}
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index a082a13df..1a85d6cc1 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
struct ice_flow_parser *parser;
int ret;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
ret = ice_fdir_setup(pf);
if (ret)
return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
struct ice_pf *pf = &ad->pf;
struct ice_flow_parser *parser;
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
parser = &ice_fdir_parser_comms;
else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 0fdd4d68d..72c8ddc9a 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
{
struct ice_flow_parser *parser = NULL;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
parser = &ice_hash_parser_os;
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -565,6 +568,9 @@ ice_hash_destroy(struct ice_adapter *ad,
static void
ice_hash_uninit(struct ice_adapter *ad)
{
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
ice_unregister_parser(&ice_hash_parser_os, ad);
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 66dc158ef..4db8f1471 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info)
+{
+ const struct rte_flow_action_vf *act_vf;
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+ act_vf = action->conf;
+ rule_info->sw_act.vsi_handle = act_vf->id;
+ break;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+ }
+ }
+
+ rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+ rule_info->rx = 1;
+ rule_info->priority = 5;
+
+ return 0;
+}
static int
ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
- ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+ if (ad->hw.dcf_enabled)
+ ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+ else
+ ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
if (ret) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 9:31 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
` (11 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add more support for PPPoE packet,
it enable swicth filter to direct PPPoE packet base on
session id and PPP protocol type.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 13 +++++
drivers/net/ice/ice_generic_flow.h | 9 ++++
drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
3 files changed, 99 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index c0420797e..0fdc7e617 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index ede6ec824..3361ecbd9 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
#define ICE_PROT_VXLAN (1ULL << 19)
#define ICE_PROT_NVGRE (1ULL << 20)
#define ICE_PROT_GTPU (1ULL << 21)
+#define ICE_PROT_PPPOE_S (1ULL << 22)
/* field */
@@ -49,6 +50,8 @@
#define ICE_NVGRE_TNI (1ULL << 50)
#define ICE_GTPU_TEID (1ULL << 49)
#define ICE_GTPU_QFI (1ULL << 48)
+#define ICE_PPPOE_SESSION (1ULL << 47)
+#define ICE_PPPOE_PROTO (1ULL << 46)
/* input set */
@@ -177,6 +180,10 @@
(ICE_PROT_GTPU | ICE_GTPU_TEID)
#define ICE_INSET_GTPU_QFI \
(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION)
+#define ICE_INSET_PPPOE_PROTO \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_PROTO)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
@@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoed[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoed[];
extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4db8f1471..add66e683 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
ICE_INSET_TUN_IPV4_TOS)
#define ICE_SW_INSET_MAC_PPPOE ( \
ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
- ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
+ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+ ICE_INSET_PPPOE_PROTO)
struct sw_meta {
struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes,
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
};
static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+ const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+ *pppoe_proto_mask;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
+ uint16_t pppoe_valid = 0;
for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
pppoe_mask = item->mask;
/* Check if PPPoE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
- if (pppoe_spec || pppoe_mask) {
+ if ((!pppoe_spec && pppoe_mask) ||
+ (pppoe_spec && !pppoe_mask)) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pppoe item");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe item");
return 0;
}
+ if (pppoe_spec && pppoe_mask) {
+ /* Check pppoe mask and update input set */
+ if (pppoe_mask->length ||
+ pppoe_mask->code ||
+ pppoe_mask->version_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe mask");
+ return 0;
+ }
+ list[t].type = ICE_PPPOE;
+ if (pppoe_mask->session_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.session_id =
+ pppoe_spec->session_id;
+ list[t].m_u.pppoe_hdr.session_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_SESSION;
+ }
+ t++;
+ pppoe_valid = 1;
+ } else if (!pppoe_spec && !pppoe_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+ pppoe_proto_spec = item->spec;
+ pppoe_proto_mask = item->mask;
+ /* Check if PPPoE optional proto_id item
+ * is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+ (pppoe_proto_spec && !pppoe_proto_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe proto item");
+ return 0;
+ }
+ if (pppoe_proto_spec && pppoe_proto_mask) {
+ if (pppoe_valid)
+ t--;
+ list[t].type = ICE_PPPOE;
+ if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.ppp_prot_id =
+ pppoe_proto_spec->proto_id;
+ list[t].m_u.pppoe_hdr.ppp_prot_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_PROTO;
+ }
+ t++;
+ } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 03/13] net/ice: change swicth parser to support flexible mask
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
` (10 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 318 ++++++++++++----------------
1 file changed, 140 insertions(+), 178 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index add66e683..4edaea3f5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -351,19 +348,31 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_TUN_SMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_TUN_DMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
+ const uint8_t *a = eth_mask->src.addr_bytes;
+ const uint8_t *b = eth_mask->dst.addr_bytes;
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (a[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_SMAC;
+ break;
+ } else if (a[j]) {
+ input_set |=
+ ICE_INSET_SMAC;
+ break;
+ }
+ }
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (b[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_DMAC;
+ break;
+ } else if (b[j]) {
+ input_set |=
+ ICE_INSET_DMAC;
+ break;
+ }
+ }
+ if (eth_mask->type)
input_set |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +382,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->src.addr_bytes[j]) {
h->src_addr[j] =
eth_spec->src.addr_bytes[j];
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
}
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
eth_spec->dst.addr_bytes[j];
m->dst_addr[j] =
@@ -392,17 +399,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (i)
t++;
- if (eth_mask->type == UINT16_MAX) {
+ if (eth_mask->type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
+ eth_mask->type;
t++;
}
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
}
break;
@@ -423,81 +427,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_TUN_IPV4_TOS;
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |=
ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |=
ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |=
ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_TUN_IPV4_PROTO;
} else {
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_IPV4_TOS;
}
list[t].type = (tunnel_valid == 0) ?
ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.src_addr) {
list[t].h_u.ipv4_hdr.src_addr =
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.src_addr;
}
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.dst_addr;
}
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
+ ipv4_mask->hdr.time_to_live;
}
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
+ ipv4_mask->hdr.next_proto_id;
}
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
+ if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ list[t].m_u.ipv4_hdr.tos =
+ ipv4_mask->hdr.type_of_service;
}
t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tunnel_valid == 0) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -513,54 +504,53 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
- if (tunnel_valid) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |=
- ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j] &&
+ tunnel_valid) {
input_set |=
- ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ ICE_INSET_TUN_IPV6_SRC;
+ break;
+ } else if (ipv6_mask->hdr.src_addr[j]) {
+ input_set |= ICE_INSET_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] &&
+ tunnel_valid) {
input_set |=
+ ICE_INSET_TUN_IPV6_DST;
+ break;
+ } else if (ipv6_mask->hdr.dst_addr[j]) {
+ input_set |= ICE_INSET_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
+ else if (ipv6_mask->hdr.proto)
+ input_set |=
+ ICE_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.hop_limits)
+ input_set |=
+ ICE_INSET_IPV6_HOP_LIMIT;
+ if ((ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |=
+ (RTE_IPV6_HDR_TC_MASK)) &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_TC;
- } else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |= ICE_INSET_IPV6_TC;
- }
+ input_set |= ICE_INSET_IPV6_TC;
+
list[t].type = (tunnel_valid == 0) ?
ICE_IPV6_OFOS : ICE_IPV6_IL;
struct ice_ipv6_hdr *f;
@@ -568,35 +558,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr[j]) {
f->src_addr[j] =
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
}
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
}
}
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ if (ipv6_mask->hdr.proto) {
f->next_hdr =
ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
+ s->next_hdr =
+ ipv6_mask->hdr.proto;
}
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
+ s->hop_limit =
+ ipv6_mask->hdr.hop_limits;
}
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK)) {
struct ice_le_ver_tc_flow vtf;
vtf.u.fld.version = 0;
@@ -606,13 +594,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
- vtf.u.fld.tc = UINT8_MAX;
+ vtf.u.fld.tc = (rte_be_to_cpu_32
+ (ipv6_mask->hdr.vtc_flow) &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
}
t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -631,21 +619,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_UDP_DST_PORT;
} else {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_UDP_DST_PORT;
}
@@ -654,21 +638,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].type = ICE_UDP_OF;
else
list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
+ if (udp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
}
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
}
t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
}
break;
@@ -692,40 +674,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_TCP_DST_PORT;
} else {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TCP_DST_PORT;
}
list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ if (tcp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
}
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
}
t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
}
break;
@@ -743,40 +719,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_SCTP_DST_PORT;
} else {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_SCTP_DST_PORT;
}
list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ if (sctp_mask->hdr.src_port) {
list[t].h_u.sctp_hdr.src_port =
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
}
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
}
t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
}
break;
@@ -799,21 +769,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
+ if (vxlan_mask->vni[0] ||
+ vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) {
list[t].h_u.tnl_hdr.vni =
(vxlan_spec->vni[2] << 16) |
(vxlan_spec->vni[1] << 8) |
vxlan_spec->vni[0];
list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
+ (vxlan_mask->vni[2] << 16) |
+ (vxlan_mask->vni[1] << 8) |
+ vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
}
t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
}
break;
@@ -835,21 +805,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
+ if (nvgre_mask->tni[0] ||
+ nvgre_mask->tni[1] ||
+ nvgre_mask->tni[2]) {
list[t].h_u.nvgre_hdr.tni_flow =
(nvgre_spec->tni[2] << 16) |
(nvgre_spec->tni[1] << 8) |
nvgre_spec->tni[0];
list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
+ (nvgre_mask->tni[2] << 16) |
+ (nvgre_mask->tni[1] << 8) |
+ nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
}
t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
}
break;
@@ -870,23 +840,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (vlan_spec && vlan_mask) {
list[t].type = ICE_VLAN_OFOS;
- if (vlan_mask->tci == UINT16_MAX) {
+ if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
- UINT16_MAX;
+ vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
}
- if (vlan_mask->inner_type == UINT16_MAX) {
+ if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
- UINT16_MAX;
+ vlan_mask->inner_type;
input_set |= ICE_INSET_VLAN_OUTER;
}
t++;
- } else if (!vlan_spec && !vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
}
break;
@@ -918,19 +886,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
list[t].type = ICE_PPPOE;
- if (pppoe_mask->session_id == UINT16_MAX) {
+ if (pppoe_mask->session_id) {
list[t].h_u.pppoe_hdr.session_id =
pppoe_spec->session_id;
list[t].m_u.pppoe_hdr.session_id =
- UINT16_MAX;
+ pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
}
t++;
pppoe_valid = 1;
- } else if (!pppoe_spec && !pppoe_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +918,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (pppoe_valid)
t--;
list[t].type = ICE_PPPOE;
- if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ if (pppoe_proto_mask->proto_id) {
list[t].h_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_spec->proto_id;
list[t].m_u.pppoe_hdr.ppp_prot_id =
- UINT16_MAX;
+ pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
}
t++;
- } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (2 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 9:21 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 05/13] net/ice: change default tunnle type Wei Zhao
` (9 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add support for MAC VLAN rule,
it enable swicth filter to direct packet base on
mac address and vlan id.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4edaea3f5..ed02d9805 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -107,6 +110,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_os[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 05/13] net/ice: change default tunnle type
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (3 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
` (8 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev
Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, stable, Wei Zhao
The default tunnle type for swicth filter change to new
defination of ICE_SW_TUN_AND_NON_TUN in order that the rule
will be apply to more packet type.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index ed02d9805..d9bdf9637 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1091,7 +1091,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
uint16_t lkups_num = 0;
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ enum ice_sw_tunnel_type tun_type =
+ ICE_SW_TUN_AND_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (4 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 05/13] net/ice: change default tunnle type Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 8:29 ` Zhang, Qi Z
2020-04-03 1:49 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
` (7 subsequent siblings)
13 siblings, 2 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev
Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, stable, Wei Zhao
The action number can only be one for DCF or PF
switch filter, not support large action.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index d9bdf9637..cc48f22dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
return -rte_errno;
}
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (actions_num > 1) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1158,6 +1198,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ ret = ice_switch_check_action(actions, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Invalid input action number");
+ goto error;
+ }
+
if (ad->hw.dcf_enabled)
ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
else
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 07/13] net/ice: add support for ESP/AH/L2TP
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (5 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 08/13] net/ice: add support for PFCP Wei Zhao
` (6 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add support for ESP/AH/L2TP packets,
it enable swicth filter to direct ipv6 packets with
ESP/AH/L2TP payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 19 +++++++
drivers/net/ice/ice_generic_flow.h | 9 +++
drivers/net/ice/ice_switch_filter.c | 87 +++++++++++++++++++++++++++--
3 files changed, 109 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 0fdc7e617..189ef6c4a 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1382,6 +1382,25 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = {
RTE_FLOW_ITEM_TYPE_ICMP6,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
struct rte_flow *flow,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 3361ecbd9..006fd00b3 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -391,6 +391,15 @@ extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
+/* ESP */
+extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+
+/* AH */
+extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+
+/* L2TP */
+extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index cc48f22dd..9c87a16dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -148,6 +148,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes_proto,
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -212,6 +218,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -319,7 +331,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
struct rte_flow_error *error,
struct ice_adv_lkup_elem *list,
uint16_t *lkups_num,
- enum ice_sw_tunnel_type tun_type)
+ enum ice_sw_tunnel_type *tun_type)
{
const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type;
@@ -335,10 +347,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
+ const struct rte_flow_item_ah *ah_spec, *ah_mask;
+ const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
+ uint16_t ipv6_valiad = 0;
for (item = pattern; item->type !=
@@ -504,6 +520,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_IPV6:
ipv6_spec = item->spec;
ipv6_mask = item->mask;
+ ipv6_valiad = 1;
if (ipv6_spec && ipv6_mask) {
if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
@@ -642,7 +659,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
input_set |=
ICE_INSET_UDP_DST_PORT;
}
- if (tun_type == ICE_SW_TUN_VXLAN &&
+ if (*tun_type == ICE_SW_TUN_VXLAN &&
tunnel_valid == 0)
list[t].type = ICE_UDP_OF;
else
@@ -938,6 +955,48 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+ if (esp_spec || esp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid esp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_AH:
+ ah_spec = item->spec;
+ ah_mask = item->mask;
+ if (ah_spec || ah_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ah item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ l2tp_spec = item->spec;
+ l2tp_mask = item->mask;
+ if (l2tp_spec || l2tp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid l2tp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1113,6 +1172,21 @@ ice_switch_check_action(const struct rte_flow_action *actions,
return 0;
}
+static bool
+ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
+{
+ switch (tun_type) {
+ case ICE_SW_TUN_PROFID_IPV6_ESP:
+ case ICE_SW_TUN_PROFID_IPV6_AH:
+ case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1168,8 +1242,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
return -rte_errno;
}
- rule_info.tun_type = tun_type;
-
sw_meta_ptr =
rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
if (!sw_meta_ptr) {
@@ -1189,8 +1261,9 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
}
inputset = ice_switch_inset_get
- (pattern, error, list, &lkups_num, tun_type);
- if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
+ (pattern, error, list, &lkups_num, &tun_type);
+ if ((!inputset && !ice_is_profile_rule(tun_type)) ||
+ (inputset & ~pattern_match_item->input_set_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
pattern,
@@ -1198,6 +1271,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ rule_info.tun_type = tun_type;
+
ret = ice_switch_check_action(actions, error);
if (ret) {
rte_flow_error_set(error, EINVAL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 08/13] net/ice: add support for PFCP
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (6 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T Wei Zhao
` (5 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add switch filter support for PFCP packets,
it enable swicth filter to direct ipv4/ipv6 packets with
PFCP session or node payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 15 +++++++
drivers/net/ice/ice_generic_flow.h | 6 +++
drivers/net/ice/ice_switch_filter.c | 62 +++++++++++++++++++++++++++++
3 files changed, 83 insertions(+)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 189ef6c4a..04dcaba08 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1400,6 +1400,21 @@ enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 006fd00b3..65cd64c7f 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -400,6 +400,12 @@ extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+/* PFCP */
+extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_pfcp[];
+
+
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9c87a16dd..9b4b9346c 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -154,6 +154,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -224,6 +228,10 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -350,6 +358,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_esp *esp_spec, *esp_mask;
const struct rte_flow_item_ah *ah_spec, *ah_mask;
const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
+ const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -996,6 +1005,55 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
break;
+ case RTE_FLOW_ITEM_TYPE_PFCP:
+ pfcp_spec = item->spec;
+ pfcp_mask = item->mask;
+ /* Check if PFCP item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pfcp_spec && pfcp_mask) ||
+ (pfcp_spec && !pfcp_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid PFCP item");
+ return -ENOTSUP;
+ }
+ if (pfcp_spec && pfcp_mask) {
+ /* Check pfcp mask and update input set */
+ if (pfcp_mask->msg_type ||
+ pfcp_mask->msg_len ||
+ pfcp_mask->seid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pfcp mask");
+ return -ENOTSUP;
+ }
+ if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01 &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
+ else
+ return -ENOTSUP;
+ }
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1179,6 +1237,10 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
return true;
default:
break;
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (7 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 08/13] net/ice: add support for PFCP Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode Wei Zhao
` (4 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add switch filter support for NAT-T packets,
it enable swicth filter to direct ipv6 packets with
NAT-T payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 14 ++++++++++++++
drivers/net/ice/ice_generic_flow.h | 2 ++
drivers/net/ice/ice_switch_filter.c | 19 +++++++++++++++++--
3 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 04dcaba08..3365aeb86 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1394,6 +1394,20 @@ enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
RTE_FLOW_ITEM_TYPE_AH,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 65cd64c7f..25badf192 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -393,9 +393,11 @@ extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
/* ESP */
extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[];
/* AH */
extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9b4b9346c..4248b8911 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -150,8 +150,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -224,8 +228,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -364,6 +372,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
uint16_t ipv6_valiad = 0;
+ uint16_t udp_valiad = 0;
for (item = pattern; item->type !=
@@ -642,6 +651,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = item->spec;
udp_mask = item->mask;
+ udp_valiad = 1;
if (udp_spec && udp_mask) {
/* Check UDP mask and update input set*/
if (udp_mask->hdr.dgram_len ||
@@ -974,7 +984,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid esp item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
break;
@@ -988,7 +1000,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid ah item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
break;
@@ -1237,6 +1251,7 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV6_NAT_T:
case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (8 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 11/13] net/ice: fix input set of VLAN item Wei Zhao
` (3 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Wei Zhao
This patch add switch filter permit mode support
for more flow pattern in pf only mode.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4248b8911..81d069e99 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -200,6 +200,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype,
+ ICE_SW_INSET_ETHER, ICE_INSET_NONE},
{pattern_ethertype_vlan,
ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -226,6 +228,18 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_udp_esp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 11/13] net/ice: fix input set of VLAN item
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (9 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch Wei Zhao
` (2 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev
Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, stable, Wei Zhao
The input set for inner type of vlan item should
be ICE_INSET_ETHERTYPE, not ICE_INSET_VLAN_OUTER.
This mac vlan filter is also part of DCF switch filter.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 81d069e99..686f9c3e3 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -911,7 +911,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
vlan_mask->inner_type;
- input_set |= ICE_INSET_VLAN_OUTER;
+ input_set |= ICE_INSET_ETHERTYPE;
}
t++;
}
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (10 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 11/13] net/ice: fix input set of VLAN item Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 7:34 ` Wang, Haiyue
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Beilei Xing
Enable flow redirect on switch, currently only
support VSI redirect.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 74 +++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 686f9c3e3..61adb518c 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1420,6 +1420,79 @@ ice_switch_query(struct ice_adapter *ad __rte_unused,
return -rte_errno;
}
+static int
+ice_switch_redirect(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct ice_flow_redirect *rd)
+{
+ struct ice_rule_query_data *rdata = flow->rule;
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_adv_lkup_elem *lkups_dp = NULL;
+ struct LIST_HEAD_TYPE *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_hw *hw = &ad->hw;
+ struct ice_switch_info *sw;
+ uint16_t lkups_cnt;
+ int ret;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[rdata->rid].recp_created)
+ return -EINVAL;
+
+ if (rd->type != ICE_FLOW_REDIRECT_VSI)
+ return -ENOTSUP;
+
+ list_head = &sw->recp_list[rdata->rid].filt_rules;
+ LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+ if (rinfo.fltr_rule_id == rdata->rule_id &&
+ rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
+ rinfo.sw_act.vsi_handle == rd->vsi_handle) {
+ lkups_cnt = list_itr->lkups_cnt;
+ lkups_dp = (struct ice_adv_lkup_elem *)
+ ice_memdup(hw, list_itr->lkups,
+ sizeof(*list_itr->lkups) *
+ lkups_cnt, ICE_NONDMA_TO_NONDMA);
+ if (!lkups_dp) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -EINVAL;
+ }
+
+ break;
+ }
+ }
+
+ if (!lkups_dp)
+ return 0;
+
+ /* Remove the old rule */
+ ret = ice_rem_adv_rule(hw, list_itr->lkups,
+ lkups_cnt, &rinfo);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
+ rdata->rule_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Update VSI context */
+ hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
+
+ /* Replay the rule */
+ ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
+ &rinfo, rdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to replay the rule");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ ice_free(hw, lkups_dp);
+ return ret;
+}
+
static int
ice_switch_init(struct ice_adapter *ad)
{
@@ -1465,6 +1538,7 @@ ice_flow_engine ice_switch_engine = {
.create = ice_switch_create,
.destroy = ice_switch_destroy,
.query_count = ice_switch_query,
+ .redirect = ice_switch_redirect,
.free = ice_switch_filter_rule_free,
.type = ICE_FLOW_ENGINE_SWITCH,
};
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (11 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch Wei Zhao
@ 2020-04-02 6:46 ` Wei Zhao
2020-04-02 7:32 ` Wang, Haiyue
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-02 6:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, yuan.peng, nannan.lu, qi.fu, haiyue.wang, Beilei Xing
After VF reset, VF's VSI number may be changed,
the switch rule which forwards packet to the old
VSI number should be redirected to the new VSI
number.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_dcf_parent.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 37f0e2be2..e05b6b3e5 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -19,6 +19,8 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
uint16_t vsi_map)
{
struct ice_vsi_ctx *vsi_ctx;
+ bool first_update = false;
+ uint16_t new_vsi_num;
if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
@@ -35,11 +37,25 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
vsi_handle);
return;
}
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ first_update = true;
}
- vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
- VIRTCHNL_DCF_VF_VSI_ID_S;
- hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+
+ /* Redirect rules if vsi mapping table changes. */
+ if (!first_update && vsi_ctx->vsi_num != new_vsi_num) {
+ struct ice_flow_redirect rd;
+
+ memset(&rd, 0, sizeof(struct ice_flow_redirect));
+ rd.type = ICE_FLOW_REDIRECT_VSI;
+ rd.vsi_handle = vsi_handle;
+ rd.new_vsi_num = new_vsi_num;
+ ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+ } else {
+ vsi_ctx->vsi_num = new_vsi_num;
+ }
PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
vsi_handle, vsi_ctx->vsi_num);
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
@ 2020-04-02 7:32 ` Wang, Haiyue
0 siblings, 0 replies; 69+ messages in thread
From: Wang, Haiyue @ 2020-04-02 7:32 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Lu, Nannan, Fu, Qi, Xing, Beilei
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Thursday, April 2, 2020 14:46
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan
> <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v2 13/13] net/ice: redirect switch rule to new VSI
>
> After VF reset, VF's VSI number may be changed,
> the switch rule which forwards packet to the old
> VSI number should be redirected to the new VSI
> number.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/ice/ice_dcf_parent.c | 22 +++++++++++++++++++---
> 1 file changed, 19 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
> index 37f0e2be2..e05b6b3e5 100644
> --- a/drivers/net/ice/ice_dcf_parent.c
> +++ b/drivers/net/ice/ice_dcf_parent.c
> @@ -19,6 +19,8 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
> uint16_t vsi_map)
> {
> struct ice_vsi_ctx *vsi_ctx;
> + bool first_update = false;
> + uint16_t new_vsi_num;
>
> if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
> PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
> @@ -35,11 +37,25 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
> vsi_handle);
> return;
> }
> + hw->vsi_ctx[vsi_handle] = vsi_ctx;
> + first_update = true;
> }
>
> - vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
> - VIRTCHNL_DCF_VF_VSI_ID_S;
> - hw->vsi_ctx[vsi_handle] = vsi_ctx;
> + new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
> + VIRTCHNL_DCF_VF_VSI_ID_S;
> +
> + /* Redirect rules if vsi mapping table changes. */
> + if (!first_update && vsi_ctx->vsi_num != new_vsi_num) {
> + struct ice_flow_redirect rd;
> +
> + memset(&rd, 0, sizeof(struct ice_flow_redirect));
> + rd.type = ICE_FLOW_REDIRECT_VSI;
> + rd.vsi_handle = vsi_handle;
> + rd.new_vsi_num = new_vsi_num;
> + ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
> + } else {
> + vsi_ctx->vsi_num = new_vsi_num;
> + }
>
> PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
> vsi_handle, vsi_ctx->vsi_num);
> --
> 2.19.1
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch Wei Zhao
@ 2020-04-02 7:34 ` Wang, Haiyue
2020-04-02 7:38 ` Xing, Beilei
0 siblings, 1 reply; 69+ messages in thread
From: Wang, Haiyue @ 2020-04-02 7:34 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Lu, Nannan, Fu, Qi, Xing, Beilei
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Thursday, April 2, 2020 14:46
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan
> <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v2 12/13] net/ice: enable flow redirect on switch
>
> Enable flow redirect on switch, currently only
> support VSI redirect.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/ice/ice_switch_filter.c | 74 +++++++++++++++++++++++++++++
> 1 file changed, 74 insertions(+)
>
> diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
> index 686f9c3e3..61adb518c 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -1420,6 +1420,79 @@ ice_switch_query(struct ice_adapter *ad __rte_unused,
> return -rte_errno;
> }
>
> +static int
> +ice_switch_redirect(struct ice_adapter *ad,
> + struct rte_flow *flow,
> + struct ice_flow_redirect *rd)
> +{
> + struct ice_rule_query_data *rdata = flow->rule;
> + struct ice_adv_fltr_mgmt_list_entry *list_itr;
> + struct ice_adv_lkup_elem *lkups_dp = NULL;
> + struct LIST_HEAD_TYPE *list_head;
> + struct ice_adv_rule_info rinfo;
> + struct ice_hw *hw = &ad->hw;
> + struct ice_switch_info *sw;
> + uint16_t lkups_cnt;
> + int ret;
> +
> + sw = hw->switch_info;
> + if (!sw->recp_list[rdata->rid].recp_created)
> + return -EINVAL;
> +
> + if (rd->type != ICE_FLOW_REDIRECT_VSI)
> + return -ENOTSUP;
> +
> + list_head = &sw->recp_list[rdata->rid].filt_rules;
> + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
> + list_entry) {
> + rinfo = list_itr->rule_info;
> + if (rinfo.fltr_rule_id == rdata->rule_id &&
> + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
> + rinfo.sw_act.vsi_handle == rd->vsi_handle) {
> + lkups_cnt = list_itr->lkups_cnt;
> + lkups_dp = (struct ice_adv_lkup_elem *)
> + ice_memdup(hw, list_itr->lkups,
> + sizeof(*list_itr->lkups) *
> + lkups_cnt, ICE_NONDMA_TO_NONDMA);
> + if (!lkups_dp) {
> + PMD_DRV_LOG(ERR, "Failed to allocate memory.");
> + return -EINVAL;
> + }
> +
> + break;
> + }
> + }
> +
> + if (!lkups_dp)
> + return 0;
> +
> + /* Remove the old rule */
> + ret = ice_rem_adv_rule(hw, list_itr->lkups,
> + lkups_cnt, &rinfo);
> + if (ret) {
> + PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
> + rdata->rule_id);
> + ret = -EINVAL;
> + goto out;
> + }
> +
> + /* Update VSI context */
> + hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
> +
> + /* Replay the rule */
> + ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
> + &rinfo, rdata);
> + if (ret) {
> + PMD_DRV_LOG(ERR, "Failed to replay the rule");
> + ret = -EINVAL;
> + goto out;
This 'goto out' can be removed.
> + }
> +
> +out:
> + ice_free(hw, lkups_dp);
> + return ret;
> +}
> +
> static int
> ice_switch_init(struct ice_adapter *ad)
> {
> @@ -1465,6 +1538,7 @@ ice_flow_engine ice_switch_engine = {
> .create = ice_switch_create,
> .destroy = ice_switch_destroy,
> .query_count = ice_switch_query,
> + .redirect = ice_switch_redirect,
> .free = ice_switch_filter_rule_free,
> .type = ICE_FLOW_ENGINE_SWITCH,
> };
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch
2020-04-02 7:34 ` Wang, Haiyue
@ 2020-04-02 7:38 ` Xing, Beilei
0 siblings, 0 replies; 69+ messages in thread
From: Xing, Beilei @ 2020-04-02 7:38 UTC (permalink / raw)
To: Wang, Haiyue, Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Lu, Nannan, Fu, Qi
> -----Original Message-----
> From: Wang, Haiyue <haiyue.wang@intel.com>
> Sent: Thursday, April 2, 2020 3:35 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>;
> Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: RE: [PATCH v2 12/13] net/ice: enable flow redirect on switch
>
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zhao1@intel.com>
> > Sent: Thursday, April 2, 2020 14:46
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi
> > <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH v2 12/13] net/ice: enable flow redirect on switch
> >
> > Enable flow redirect on switch, currently only support VSI redirect.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/ice/ice_switch_filter.c | 74
> > +++++++++++++++++++++++++++++
> > 1 file changed, 74 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index 686f9c3e3..61adb518c 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -1420,6 +1420,79 @@ ice_switch_query(struct ice_adapter *ad
> > __rte_unused, return -rte_errno; }
> >
> > +static int
> > +ice_switch_redirect(struct ice_adapter *ad,
> > + struct rte_flow *flow,
> > + struct ice_flow_redirect *rd)
> > +{
> > +struct ice_rule_query_data *rdata = flow->rule; struct
> > +ice_adv_fltr_mgmt_list_entry *list_itr; struct ice_adv_lkup_elem
> > +*lkups_dp = NULL; struct LIST_HEAD_TYPE *list_head; struct
> > +ice_adv_rule_info rinfo; struct ice_hw *hw = &ad->hw; struct
> > +ice_switch_info *sw; uint16_t lkups_cnt; int ret;
> > +
> > +sw = hw->switch_info;
> > +if (!sw->recp_list[rdata->rid].recp_created)
> > +return -EINVAL;
> > +
> > +if (rd->type != ICE_FLOW_REDIRECT_VSI) return -ENOTSUP;
> > +
> > +list_head = &sw->recp_list[rdata->rid].filt_rules;
> > +LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
> > + list_entry) {
> > +rinfo = list_itr->rule_info;
> > +if (rinfo.fltr_rule_id == rdata->rule_id &&
> > + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
> > + rinfo.sw_act.vsi_handle == rd->vsi_handle) { lkups_cnt =
> > +list_itr->lkups_cnt; lkups_dp = (struct ice_adv_lkup_elem *)
> > +ice_memdup(hw, list_itr->lkups,
> > + sizeof(*list_itr->lkups) *
> > + lkups_cnt, ICE_NONDMA_TO_NONDMA);
> > +if (!lkups_dp) {
> > +PMD_DRV_LOG(ERR, "Failed to allocate memory."); return -EINVAL; }
> > +
> > +break;
> > +}
> > +}
> > +
> > +if (!lkups_dp)
> > +return 0;
> > +
> > +/* Remove the old rule */
> > +ret = ice_rem_adv_rule(hw, list_itr->lkups,
> > + lkups_cnt, &rinfo);
> > +if (ret) {
> > +PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
> > + rdata->rule_id);
> > +ret = -EINVAL;
> > +goto out;
> > +}
> > +
> > +/* Update VSI context */
> > +hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
> > +
> > +/* Replay the rule */
> > +ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
> > + &rinfo, rdata);
> > +if (ret) {
> > +PMD_DRV_LOG(ERR, "Failed to replay the rule"); ret = -EINVAL; goto
> > +out;
>
> This 'goto out' can be removed.
Yes, will remove in next version. Thanks.
>
> > +}
> > +
> > +out:
> > +ice_free(hw, lkups_dp);
> > +return ret;
> > +}
> > +
> > static int
> > ice_switch_init(struct ice_adapter *ad) { @@ -1465,6 +1538,7 @@
> > ice_flow_engine ice_switch_engine = { .create = ice_switch_create,
> > .destroy = ice_switch_destroy, .query_count = ice_switch_query,
> > +.redirect = ice_switch_redirect,
> > .free = ice_switch_filter_rule_free,
> > .type = ICE_FLOW_ENGINE_SWITCH,
> > };
> > --
> > 2.19.1
>
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
@ 2020-04-02 8:29 ` Zhang, Qi Z
2020-04-02 8:31 ` Zhao1, Wei
2020-04-03 1:49 ` Lu, Nannan
1 sibling, 1 reply; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-02 8:29 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue, stable
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Thursday, April 2, 2020 2:46 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>;
> Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue
> <haiyue.wang@intel.com>; stable@dpdk.org; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v2 06/13] net/ice: add action number check for swicth
>
> The action number can only be one for DCF or PF switch filter, not support
> large action.
There is no "large action" in rte_flow, maybe just "not support multiple actions"?
>
> Cc: stable@dpdk.org
> Fixes: 47d460d63233 ("net/ice: rework switch filter")
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
> 1 file changed, 48 insertions(+)
>
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> index d9bdf9637..cc48f22dd 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
> return -rte_errno;
> }
>
> +static int
> +ice_switch_check_action(const struct rte_flow_action *actions,
> + struct rte_flow_error *error)
> +{
> + const struct rte_flow_action *action;
> + enum rte_flow_action_type action_type;
> + uint16_t actions_num = 0;
> +
> + for (action = actions; action->type !=
> + RTE_FLOW_ACTION_TYPE_END; action++) {
> + action_type = action->type;
> + switch (action_type) {
> + case RTE_FLOW_ACTION_TYPE_VF:
> + case RTE_FLOW_ACTION_TYPE_RSS:
> + case RTE_FLOW_ACTION_TYPE_QUEUE:
> + case RTE_FLOW_ACTION_TYPE_DROP:
> + actions_num++;
> + break;
> + case RTE_FLOW_ACTION_TYPE_VOID:
> + continue;
> + default:
> + rte_flow_error_set(error,
> + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> + actions,
> + "Invalid action type");
> + return -rte_errno;
> + }
> + }
> +
> + if (actions_num > 1) {
> + rte_flow_error_set(error,
> + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> + actions,
> + "Invalid action number");
> + return -rte_errno;
> + }
> +
> + return 0;
> +}
> +
> static int
> ice_switch_parse_pattern_action(struct ice_adapter *ad,
> struct ice_pattern_match_item *array, @@ -1158,6 +1198,14 @@
> ice_switch_parse_pattern_action(struct ice_adapter *ad,
> goto error;
> }
>
> + ret = ice_switch_check_action(actions, error);
> + if (ret) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Invalid input action number");
> + goto error;
> + }
> +
> if (ad->hw.dcf_enabled)
> ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
> else
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth
2020-04-02 8:29 ` Zhang, Qi Z
@ 2020-04-02 8:31 ` Zhao1, Wei
0 siblings, 0 replies; 69+ messages in thread
From: Zhao1, Wei @ 2020-04-02 8:31 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue, stable
Ok
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Thursday, April 2, 2020 4:30 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>;
> stable@dpdk.org
> Subject: RE: [PATCH v2 06/13] net/ice: add action number check for swicth
>
>
>
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zhao1@intel.com>
> > Sent: Thursday, April 2, 2020 2:46 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi
> > <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>;
> > stable@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [PATCH v2 06/13] net/ice: add action number check for swicth
> >
> > The action number can only be one for DCF or PF switch filter, not
> > support large action.
>
> There is no "large action" in rte_flow, maybe just "not support multiple
> actions"?
>
> >
> > Cc: stable@dpdk.org
> > Fixes: 47d460d63233 ("net/ice: rework switch filter")
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/ice/ice_switch_filter.c | 48
> > +++++++++++++++++++++++++++++
> > 1 file changed, 48 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index d9bdf9637..cc48f22dd 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
> > return -rte_errno;
> > }
> >
> > +static int
> > +ice_switch_check_action(const struct rte_flow_action *actions,
> > + struct rte_flow_error *error)
> > +{
> > + const struct rte_flow_action *action;
> > + enum rte_flow_action_type action_type;
> > + uint16_t actions_num = 0;
> > +
> > + for (action = actions; action->type !=
> > + RTE_FLOW_ACTION_TYPE_END; action++) {
> > + action_type = action->type;
> > + switch (action_type) {
> > + case RTE_FLOW_ACTION_TYPE_VF:
> > + case RTE_FLOW_ACTION_TYPE_RSS:
> > + case RTE_FLOW_ACTION_TYPE_QUEUE:
> > + case RTE_FLOW_ACTION_TYPE_DROP:
> > + actions_num++;
> > + break;
> > + case RTE_FLOW_ACTION_TYPE_VOID:
> > + continue;
> > + default:
> > + rte_flow_error_set(error,
> > + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> > + actions,
> > + "Invalid action type");
> > + return -rte_errno;
> > + }
> > + }
> > +
> > + if (actions_num > 1) {
> > + rte_flow_error_set(error,
> > + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> > + actions,
> > + "Invalid action number");
> > + return -rte_errno;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > static int
> > ice_switch_parse_pattern_action(struct ice_adapter *ad,
> > struct ice_pattern_match_item *array, @@ -1158,6 +1198,14 @@
> > ice_switch_parse_pattern_action(struct ice_adapter *ad,
> > goto error;
> > }
> >
> > + ret = ice_switch_check_action(actions, error);
> > + if (ret) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > + "Invalid input action number");
> > + goto error;
> > + }
> > +
> > if (ad->hw.dcf_enabled)
> > ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
> > else
> > --
> > 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T Wei Zhao
@ 2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 23:37 ` Zhao1, Wei
0 siblings, 1 reply; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-02 8:45 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Thursday, April 2, 2020 2:46 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>;
> Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue
> <haiyue.wang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [PATCH v2 09/13] net/ice: add support for NAT-T
Better change to "add support for IPv6 NAT-T", since the patch is only for IPv6
>
> This patch add switch filter support for NAT-T packets, it enable swicth filter to
> direct ipv6 packets with NAT-T payload to specific action.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_generic_flow.c | 14 ++++++++++++++
> drivers/net/ice/ice_generic_flow.h | 2 ++
> drivers/net/ice/ice_switch_filter.c | 19 +++++++++++++++++--
> 3 files changed, 33 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> index 04dcaba08..3365aeb86 100644
> --- a/drivers/net/ice/ice_generic_flow.c
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -1394,6 +1394,20 @@ enum rte_flow_item_type pattern_eth_ipv6_ah[] =
> {
> RTE_FLOW_ITEM_TYPE_AH,
> RTE_FLOW_ITEM_TYPE_END,
> };
> +enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = {
> + RTE_FLOW_ITEM_TYPE_ETH,
> + RTE_FLOW_ITEM_TYPE_IPV6,
> + RTE_FLOW_ITEM_TYPE_UDP,
> + RTE_FLOW_ITEM_TYPE_ESP,
> + RTE_FLOW_ITEM_TYPE_END,
> +};
> +enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = {
> + RTE_FLOW_ITEM_TYPE_ETH,
> + RTE_FLOW_ITEM_TYPE_IPV6,
> + RTE_FLOW_ITEM_TYPE_UDP,
> + RTE_FLOW_ITEM_TYPE_AH,
> + RTE_FLOW_ITEM_TYPE_END,
> +};
> enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
> RTE_FLOW_ITEM_TYPE_ETH,
> RTE_FLOW_ITEM_TYPE_IPV6,
> diff --git a/drivers/net/ice/ice_generic_flow.h
> b/drivers/net/ice/ice_generic_flow.h
> index 65cd64c7f..25badf192 100644
> --- a/drivers/net/ice/ice_generic_flow.h
> +++ b/drivers/net/ice/ice_generic_flow.h
> @@ -393,9 +393,11 @@ extern enum rte_flow_item_type
> pattern_eth_qinq_pppoes_ipv6_icmp6[];
>
> /* ESP */
> extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
> +extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[];
>
> /* AH */
> extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
> +extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[];
>
> /* L2TP */
> extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[]; diff --git
> a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
> index 9b4b9346c..4248b8911 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -150,8 +150,12 @@ ice_pattern_match_item
> ice_switch_pattern_dist_comms[] = {
> ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> {pattern_eth_ipv6_esp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_udp_esp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_ah,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_udp_ah,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_l2tp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv4_pfcp,
> @@ -224,8 +228,12 @@ ice_pattern_match_item ice_switch_pattern_perm[]
> = {
> ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
> {pattern_eth_ipv6_esp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_udp_esp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_ah,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_udp_ah,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_l2tp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv4_pfcp,
> @@ -364,6 +372,7 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> uint16_t tunnel_valid = 0;
> uint16_t pppoe_valid = 0;
> uint16_t ipv6_valiad = 0;
> + uint16_t udp_valiad = 0;
>
>
> for (item = pattern; item->type !=
> @@ -642,6 +651,7 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> case RTE_FLOW_ITEM_TYPE_UDP:
> udp_spec = item->spec;
> udp_mask = item->mask;
> + udp_valiad = 1;
> if (udp_spec && udp_mask) {
> /* Check UDP mask and update input set*/
> if (udp_mask->hdr.dgram_len ||
> @@ -974,7 +984,9 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> "Invalid esp item");
> return -ENOTSUP;
> }
> - if (ipv6_valiad)
> + if (ipv6_valiad && udp_valiad)
> + *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
> + else if (ipv6_valiad)
> *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
> break;
>
> @@ -988,7 +1000,9 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> "Invalid ah item");
> return -ENOTSUP;
> }
> - if (ipv6_valiad)
> + if (ipv6_valiad && udp_valiad)
> + *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
> + else if (ipv6_valiad)
> *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
> break;
>
> @@ -1237,6 +1251,7 @@ ice_is_profile_rule(enum ice_sw_tunnel_type
> tun_type)
> case ICE_SW_TUN_PROFID_IPV6_ESP:
> case ICE_SW_TUN_PROFID_IPV6_AH:
> case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
> + case ICE_SW_TUN_PROFID_IPV6_NAT_T:
> case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
> case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
> case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode Wei Zhao
@ 2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 9:41 ` Zhao1, Wei
0 siblings, 1 reply; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-02 8:45 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Thursday, April 2, 2020 2:46 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>;
> Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue
> <haiyue.wang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [PATCH v2 10/13] net/ice: add more flow support for permit mode
>
It should be "permission stage" but not "permit mode"
> This patch add switch filter permit mode support for more flow pattern in pf
> only mode.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
> 1 file changed, 14 insertions(+)
>
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> index 4248b8911..81d069e99 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -200,6 +200,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[]
> = {
>
> static struct
> ice_pattern_match_item ice_switch_pattern_perm[] = {
> + {pattern_ethertype,
> + ICE_SW_INSET_ETHER, ICE_INSET_NONE},
> {pattern_ethertype_vlan,
> ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
> {pattern_eth_ipv4,
> @@ -226,6 +228,18 @@ ice_pattern_match_item ice_switch_pattern_perm[]
> = {
> ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
> {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
> ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
> + {pattern_eth_pppoed,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoed,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_pppoes,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoes,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_pppoes_proto,
> + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoes_proto,
> + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> {pattern_eth_ipv6_esp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_udp_esp,
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
@ 2020-04-02 9:21 ` Lu, Nannan
0 siblings, 0 replies; 69+ messages in thread
From: Lu, Nannan @ 2020-04-02 9:21 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Fu, Qi, Wang, Haiyue
Tested-by: Lu, Nannan <nannan.lu@intel.com>
-----Original Message-----
From: Zhao1, Wei
Sent: Thursday, April 2, 2020 2:46 PM
To: dev@dpdk.org
Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
Subject: [PATCH v2 04/13] net/ice: add support for MAC VLAN rule
This patch add support for MAC VLAN rule, it enable swicth filter to direct packet base on mac address and vlan id.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4edaea3f5..ed02d9805 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) @@ -107,6 +110,8 @@ static struct ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_os[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set Wei Zhao
@ 2020-04-02 9:31 ` Lu, Nannan
0 siblings, 0 replies; 69+ messages in thread
From: Lu, Nannan @ 2020-04-02 9:31 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Fu, Qi, Wang, Haiyue
PPPOE session id Tested-by: Lu, Nannan <nannan.lu@intel.com>
-----Original Message-----
From: Zhao1, Wei
Sent: Thursday, April 2, 2020 2:46 PM
To: dev@dpdk.org
Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
Subject: [PATCH v2 02/13] net/ice: support for more PPPoE input set
This patch add more support for PPPoE packet, it enable swicth filter to direct PPPoE packet base on session id and PPP protocol type.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 13 +++++ drivers/net/ice/ice_generic_flow.h | 9 ++++ drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
3 files changed, 99 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index c0420797e..0fdc7e617 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index ede6ec824..3361ecbd9 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
#define ICE_PROT_VXLAN (1ULL << 19)
#define ICE_PROT_NVGRE (1ULL << 20)
#define ICE_PROT_GTPU (1ULL << 21)
+#define ICE_PROT_PPPOE_S (1ULL << 22)
/* field */
@@ -49,6 +50,8 @@
#define ICE_NVGRE_TNI (1ULL << 50)
#define ICE_GTPU_TEID (1ULL << 49)
#define ICE_GTPU_QFI (1ULL << 48)
+#define ICE_PPPOE_SESSION (1ULL << 47)
+#define ICE_PPPOE_PROTO (1ULL << 46)
/* input set */
@@ -177,6 +180,10 @@
(ICE_PROT_GTPU | ICE_GTPU_TEID)
#define ICE_INSET_GTPU_QFI \
(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION) #define ICE_INSET_PPPOE_PROTO \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_PROTO)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[]; @@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[]; extern enum rte_flow_item_type pattern_eth_vlan_pppoed[]; extern enum rte_flow_item_type pattern_eth_qinq_pppoed[]; extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[]; extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[]; extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[]; diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4db8f1471..add66e683 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
ICE_INSET_TUN_IPV4_TOS)
#define ICE_SW_INSET_MAC_PPPOE ( \
ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
- ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
+ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+ ICE_INSET_PPPOE_PROTO)
struct sw_meta {
struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes,
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
};
static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+ const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+ *pppoe_proto_mask;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
+ uint16_t pppoe_valid = 0;
for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
pppoe_mask = item->mask;
/* Check if PPPoE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
- if (pppoe_spec || pppoe_mask) {
+ if ((!pppoe_spec && pppoe_mask) ||
+ (pppoe_spec && !pppoe_mask)) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pppoe item");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe item");
return 0;
}
+ if (pppoe_spec && pppoe_mask) {
+ /* Check pppoe mask and update input set */
+ if (pppoe_mask->length ||
+ pppoe_mask->code ||
+ pppoe_mask->version_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe mask");
+ return 0;
+ }
+ list[t].type = ICE_PPPOE;
+ if (pppoe_mask->session_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.session_id =
+ pppoe_spec->session_id;
+ list[t].m_u.pppoe_hdr.session_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_SESSION;
+ }
+ t++;
+ pppoe_valid = 1;
+ } else if (!pppoe_spec && !pppoe_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+ pppoe_proto_spec = item->spec;
+ pppoe_proto_mask = item->mask;
+ /* Check if PPPoE optional proto_id item
+ * is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+ (pppoe_proto_spec && !pppoe_proto_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe proto item");
+ return 0;
+ }
+ if (pppoe_proto_spec && pppoe_proto_mask) {
+ if (pppoe_valid)
+ t--;
+ list[t].type = ICE_PPPOE;
+ if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.ppp_prot_id =
+ pppoe_proto_spec->proto_id;
+ list[t].m_u.pppoe_hdr.ppp_prot_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_PROTO;
+ }
+ t++;
+ } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode
2020-04-02 8:45 ` Zhang, Qi Z
@ 2020-04-02 9:41 ` Zhao1, Wei
0 siblings, 0 replies; 69+ messages in thread
From: Zhao1, Wei @ 2020-04-02 9:41 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue
Ok
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Thursday, April 2, 2020 4:46 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>
> Subject: RE: [PATCH v2 10/13] net/ice: add more flow support for permit mode
>
>
>
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zhao1@intel.com>
> > Sent: Thursday, April 2, 2020 2:46 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi
> > <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v2 10/13] net/ice: add more flow support for permit
> > mode
> >
> It should be "permission stage" but not "permit mode"
>
> > This patch add switch filter permit mode support for more flow pattern
> > in pf only mode.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
> > 1 file changed, 14 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index 4248b8911..81d069e99 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -200,6 +200,8 @@ ice_pattern_match_item
> > ice_switch_pattern_dist_os[] = {
> >
> > static struct
> > ice_pattern_match_item ice_switch_pattern_perm[] = {
> > + {pattern_ethertype,
> > + ICE_SW_INSET_ETHER, ICE_INSET_NONE},
> > {pattern_ethertype_vlan,
> > ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
> > {pattern_eth_ipv4,
> > @@ -226,6 +228,18 @@ ice_pattern_match_item
> ice_switch_pattern_perm[]
> > = {
> > ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
> > {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
> > ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
> > + {pattern_eth_pppoed,
> > + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> > + {pattern_eth_vlan_pppoed,
> > + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> > + {pattern_eth_pppoes,
> > + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> > + {pattern_eth_vlan_pppoes,
> > + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> > + {pattern_eth_pppoes_proto,
> > + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> > + {pattern_eth_vlan_pppoes_proto,
> > + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> > {pattern_eth_ipv6_esp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_udp_esp,
> > --
> > 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T
2020-04-02 8:45 ` Zhang, Qi Z
@ 2020-04-02 23:37 ` Zhao1, Wei
0 siblings, 0 replies; 69+ messages in thread
From: Zhao1, Wei @ 2020-04-02 23:37 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Peng, Yuan, Lu, Nannan, Fu, Qi, Wang, Haiyue
Ok.
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Thursday, April 2, 2020 4:45 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>
> Subject: RE: [PATCH v2 09/13] net/ice: add support for NAT-T
>
>
>
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zhao1@intel.com>
> > Sent: Thursday, April 2, 2020 2:46 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi
> > <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v2 09/13] net/ice: add support for NAT-T
>
> Better change to "add support for IPv6 NAT-T", since the patch is only for IPv6
>
> >
> > This patch add switch filter support for NAT-T packets, it enable
> > swicth filter to direct ipv6 packets with NAT-T payload to specific action.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/ice/ice_generic_flow.c | 14 ++++++++++++++
> > drivers/net/ice/ice_generic_flow.h | 2 ++
> > drivers/net/ice/ice_switch_filter.c | 19 +++++++++++++++++--
> > 3 files changed, 33 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_generic_flow.c
> > b/drivers/net/ice/ice_generic_flow.c
> > index 04dcaba08..3365aeb86 100644
> > --- a/drivers/net/ice/ice_generic_flow.c
> > +++ b/drivers/net/ice/ice_generic_flow.c
> > @@ -1394,6 +1394,20 @@ enum rte_flow_item_type pattern_eth_ipv6_ah[]
> =
> > {
> > RTE_FLOW_ITEM_TYPE_AH,
> > RTE_FLOW_ITEM_TYPE_END,
> > };
> > +enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = {
> > + RTE_FLOW_ITEM_TYPE_ETH,
> > + RTE_FLOW_ITEM_TYPE_IPV6,
> > + RTE_FLOW_ITEM_TYPE_UDP,
> > + RTE_FLOW_ITEM_TYPE_ESP,
> > + RTE_FLOW_ITEM_TYPE_END,
> > +};
> > +enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = {
> > + RTE_FLOW_ITEM_TYPE_ETH,
> > + RTE_FLOW_ITEM_TYPE_IPV6,
> > + RTE_FLOW_ITEM_TYPE_UDP,
> > + RTE_FLOW_ITEM_TYPE_AH,
> > + RTE_FLOW_ITEM_TYPE_END,
> > +};
> > enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
> > RTE_FLOW_ITEM_TYPE_ETH,
> > RTE_FLOW_ITEM_TYPE_IPV6,
> > diff --git a/drivers/net/ice/ice_generic_flow.h
> > b/drivers/net/ice/ice_generic_flow.h
> > index 65cd64c7f..25badf192 100644
> > --- a/drivers/net/ice/ice_generic_flow.h
> > +++ b/drivers/net/ice/ice_generic_flow.h
> > @@ -393,9 +393,11 @@ extern enum rte_flow_item_type
> > pattern_eth_qinq_pppoes_ipv6_icmp6[];
> >
> > /* ESP */
> > extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
> > +extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[];
> >
> > /* AH */
> > extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
> > +extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[];
> >
> > /* L2TP */
> > extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[]; diff --git
> > a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index 9b4b9346c..4248b8911 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -150,8 +150,12 @@ ice_pattern_match_item
> > ice_switch_pattern_dist_comms[] = {
> > ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> > {pattern_eth_ipv6_esp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_udp_esp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_ah,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_udp_ah,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_l2tp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv4_pfcp,
> > @@ -224,8 +228,12 @@ ice_pattern_match_item
> ice_switch_pattern_perm[]
> > = {
> > ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
> > {pattern_eth_ipv6_esp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_udp_esp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_ah,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_udp_ah,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_l2tp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv4_pfcp,
> > @@ -364,6 +372,7 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > uint16_t tunnel_valid = 0;
> > uint16_t pppoe_valid = 0;
> > uint16_t ipv6_valiad = 0;
> > + uint16_t udp_valiad = 0;
> >
> >
> > for (item = pattern; item->type !=
> > @@ -642,6 +651,7 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > case RTE_FLOW_ITEM_TYPE_UDP:
> > udp_spec = item->spec;
> > udp_mask = item->mask;
> > + udp_valiad = 1;
> > if (udp_spec && udp_mask) {
> > /* Check UDP mask and update input set*/
> > if (udp_mask->hdr.dgram_len ||
> > @@ -974,7 +984,9 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > "Invalid esp item");
> > return -ENOTSUP;
> > }
> > - if (ipv6_valiad)
> > + if (ipv6_valiad && udp_valiad)
> > + *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
> > + else if (ipv6_valiad)
> > *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
> > break;
> >
> > @@ -988,7 +1000,9 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > "Invalid ah item");
> > return -ENOTSUP;
> > }
> > - if (ipv6_valiad)
> > + if (ipv6_valiad && udp_valiad)
> > + *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
> > + else if (ipv6_valiad)
> > *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
> > break;
> >
> > @@ -1237,6 +1251,7 @@ ice_is_profile_rule(enum ice_sw_tunnel_type
> > tun_type)
> > case ICE_SW_TUN_PROFID_IPV6_ESP:
> > case ICE_SW_TUN_PROFID_IPV6_AH:
> > case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
> > + case ICE_SW_TUN_PROFID_IPV6_NAT_T:
> > case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
> > case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
> > case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
> > --
> > 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-02 8:29 ` Zhang, Qi Z
@ 2020-04-03 1:49 ` Lu, Nannan
1 sibling, 0 replies; 69+ messages in thread
From: Lu, Nannan @ 2020-04-03 1:49 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhang, Qi Z, Peng, Yuan, Fu, Qi, Wang, Haiyue, stable
Tested-by: Lu, Nannan <nannan.lu@intel.com>
-----Original Message-----
From: Zhao1, Wei
Sent: Thursday, April 2, 2020 2:46 PM
To: dev@dpdk.org
Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Wang, Haiyue <haiyue.wang@intel.com>; stable@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
Subject: [PATCH v2 06/13] net/ice: add action number check for swicth
The action number can only be one for DCF or PF switch filter, not support large action.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index d9bdf9637..cc48f22dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
return -rte_errno;
}
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (actions_num > 1) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array, @@ -1158,6 +1198,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ ret = ice_switch_check_action(actions, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Invalid input action number");
+ goto error;
+ }
+
if (ad->hw.dcf_enabled)
ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
else
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
` (12 preceding siblings ...)
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF Wei Zhao
` (13 more replies)
13 siblings, 14 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng
A DCF (Device Config Function) framework has been add for intel device,
this patch set add add switch filter support for it, this set also fix
bugs which block this feature.
This patchset is based on:
[1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
Depends-on: series-8859
v2:
-add switch filter support for AH/ESP/PFCP packet
-fix some patch check warning
-add flow redirect on switch patch
v3:
-update commit log
-update in redirect on switch patch
Beilei Xing (2):
net/ice: enable flow redirect on switch
net/ice: redirect switch rule to new VSI
Wei Zhao (11):
net/ice: enable switch flow on DCF
net/ice: support for more PPPoE input set
net/ice: change swicth parser to support flexible mask
net/ice: add support for MAC VLAN rule
net/ice: change default tunnle type
net/ice: add action number check for swicth
net/ice: add support for ESP/AH/L2TP
net/ice: add support for PFCP
net/ice: add support for IPv6 NAT-T
net/ice: add more flow support for permit stage
net/ice: fix input set of VLAN item
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +-
drivers/net/ice/ice_dcf_parent.c | 30 +-
drivers/net/ice/ice_fdir_filter.c | 6 +
drivers/net/ice/ice_generic_flow.c | 61 +++
drivers/net/ice/ice_generic_flow.h | 26 +
drivers/net/ice/ice_hash.c | 6 +
drivers/net/ice/ice_switch_filter.c | 730 +++++++++++++++++++------
8 files changed, 684 insertions(+), 187 deletions(-)
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 02/13] net/ice: support for more PPPoE input set Wei Zhao
` (12 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +++++--
drivers/net/ice/ice_dcf_parent.c | 8 ++++++
drivers/net/ice/ice_fdir_filter.c | 6 ++++
drivers/net/ice/ice_hash.c | 6 ++++
drivers/net/ice/ice_switch_filter.c | 39 +++++++++++++++++++++++++-
6 files changed, 67 insertions(+), 4 deletions(-)
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 9bc647284..bde7e47fb 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -68,7 +68,7 @@ New Features
Updated the Intel ice driver with new features and improvements, including:
* Added support for DCF (Device Config Function) feature.
-
+ * Added switch filter support for intel DCF.
Removed Items
-------------
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index af94caeff..e5ba1a61f 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
- __rte_unused enum rte_filter_op filter_op,
- __rte_unused void *arg)
+ enum rte_filter_op filter_op,
+ void *arg)
{
int ret = 0;
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ice_flow_ops;
+ break;
+
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index ff08292a1..37f0e2be2 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -9,6 +9,7 @@
#include <rte_spinlock.h>
#include "ice_dcf_ethdev.h"
+#include "ice_generic_flow.h"
#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
@@ -321,6 +322,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ err = ice_flow_init(parent_adapter);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ goto uninit_hw;
+ }
+
ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
@@ -347,5 +354,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = NULL;
+ ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
}
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index a082a13df..1a85d6cc1 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
struct ice_flow_parser *parser;
int ret;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
ret = ice_fdir_setup(pf);
if (ret)
return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
struct ice_pf *pf = &ad->pf;
struct ice_flow_parser *parser;
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
parser = &ice_fdir_parser_comms;
else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 0fdd4d68d..72c8ddc9a 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
{
struct ice_flow_parser *parser = NULL;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
parser = &ice_hash_parser_os;
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -565,6 +568,9 @@ ice_hash_destroy(struct ice_adapter *ad,
static void
ice_hash_uninit(struct ice_adapter *ad)
{
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
ice_unregister_parser(&ice_hash_parser_os, ad);
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 66dc158ef..4db8f1471 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info)
+{
+ const struct rte_flow_action_vf *act_vf;
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+ act_vf = action->conf;
+ rule_info->sw_act.vsi_handle = act_vf->id;
+ break;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+ }
+ }
+
+ rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+ rule_info->rx = 1;
+ rule_info->priority = 5;
+
+ return 0;
+}
static int
ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
- ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+ if (ad->hw.dcf_enabled)
+ ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+ else
+ ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
if (ret) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 02/13] net/ice: support for more PPPoE input set
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
` (11 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add more support for PPPoE packet,
it enable swicth filter to direct PPPoE packet base on
session id and PPP protocol type.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 13 +++++
drivers/net/ice/ice_generic_flow.h | 9 ++++
drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
3 files changed, 99 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index c0420797e..0fdc7e617 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index ede6ec824..3361ecbd9 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
#define ICE_PROT_VXLAN (1ULL << 19)
#define ICE_PROT_NVGRE (1ULL << 20)
#define ICE_PROT_GTPU (1ULL << 21)
+#define ICE_PROT_PPPOE_S (1ULL << 22)
/* field */
@@ -49,6 +50,8 @@
#define ICE_NVGRE_TNI (1ULL << 50)
#define ICE_GTPU_TEID (1ULL << 49)
#define ICE_GTPU_QFI (1ULL << 48)
+#define ICE_PPPOE_SESSION (1ULL << 47)
+#define ICE_PPPOE_PROTO (1ULL << 46)
/* input set */
@@ -177,6 +180,10 @@
(ICE_PROT_GTPU | ICE_GTPU_TEID)
#define ICE_INSET_GTPU_QFI \
(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION)
+#define ICE_INSET_PPPOE_PROTO \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_PROTO)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
@@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoed[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoed[];
extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4db8f1471..add66e683 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
ICE_INSET_TUN_IPV4_TOS)
#define ICE_SW_INSET_MAC_PPPOE ( \
ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
- ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
+ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+ ICE_INSET_PPPOE_PROTO)
struct sw_meta {
struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes,
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
};
static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+ const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+ *pppoe_proto_mask;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
+ uint16_t pppoe_valid = 0;
for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
pppoe_mask = item->mask;
/* Check if PPPoE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
- if (pppoe_spec || pppoe_mask) {
+ if ((!pppoe_spec && pppoe_mask) ||
+ (pppoe_spec && !pppoe_mask)) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pppoe item");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe item");
return 0;
}
+ if (pppoe_spec && pppoe_mask) {
+ /* Check pppoe mask and update input set */
+ if (pppoe_mask->length ||
+ pppoe_mask->code ||
+ pppoe_mask->version_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe mask");
+ return 0;
+ }
+ list[t].type = ICE_PPPOE;
+ if (pppoe_mask->session_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.session_id =
+ pppoe_spec->session_id;
+ list[t].m_u.pppoe_hdr.session_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_SESSION;
+ }
+ t++;
+ pppoe_valid = 1;
+ } else if (!pppoe_spec && !pppoe_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+ pppoe_proto_spec = item->spec;
+ pppoe_proto_mask = item->mask;
+ /* Check if PPPoE optional proto_id item
+ * is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+ (pppoe_proto_spec && !pppoe_proto_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe proto item");
+ return 0;
+ }
+ if (pppoe_proto_spec && pppoe_proto_mask) {
+ if (pppoe_valid)
+ t--;
+ list[t].type = ICE_PPPOE;
+ if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.ppp_prot_id =
+ pppoe_proto_spec->proto_id;
+ list[t].m_u.pppoe_hdr.ppp_prot_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_PROTO;
+ }
+ t++;
+ } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 03/13] net/ice: change swicth parser to support flexible mask
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 02/13] net/ice: support for more PPPoE input set Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
` (10 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 318 ++++++++++++----------------
1 file changed, 140 insertions(+), 178 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index add66e683..4edaea3f5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -351,19 +348,31 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_TUN_SMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_TUN_DMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
+ const uint8_t *a = eth_mask->src.addr_bytes;
+ const uint8_t *b = eth_mask->dst.addr_bytes;
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (a[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_SMAC;
+ break;
+ } else if (a[j]) {
+ input_set |=
+ ICE_INSET_SMAC;
+ break;
+ }
+ }
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (b[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_DMAC;
+ break;
+ } else if (b[j]) {
+ input_set |=
+ ICE_INSET_DMAC;
+ break;
+ }
+ }
+ if (eth_mask->type)
input_set |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +382,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->src.addr_bytes[j]) {
h->src_addr[j] =
eth_spec->src.addr_bytes[j];
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
}
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
eth_spec->dst.addr_bytes[j];
m->dst_addr[j] =
@@ -392,17 +399,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (i)
t++;
- if (eth_mask->type == UINT16_MAX) {
+ if (eth_mask->type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
+ eth_mask->type;
t++;
}
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
}
break;
@@ -423,81 +427,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_TUN_IPV4_TOS;
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |=
ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |=
ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |=
ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_TUN_IPV4_PROTO;
} else {
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_IPV4_TOS;
}
list[t].type = (tunnel_valid == 0) ?
ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.src_addr) {
list[t].h_u.ipv4_hdr.src_addr =
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.src_addr;
}
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.dst_addr;
}
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
+ ipv4_mask->hdr.time_to_live;
}
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
+ ipv4_mask->hdr.next_proto_id;
}
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
+ if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ list[t].m_u.ipv4_hdr.tos =
+ ipv4_mask->hdr.type_of_service;
}
t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tunnel_valid == 0) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -513,54 +504,53 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
- if (tunnel_valid) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |=
- ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j] &&
+ tunnel_valid) {
input_set |=
- ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ ICE_INSET_TUN_IPV6_SRC;
+ break;
+ } else if (ipv6_mask->hdr.src_addr[j]) {
+ input_set |= ICE_INSET_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] &&
+ tunnel_valid) {
input_set |=
+ ICE_INSET_TUN_IPV6_DST;
+ break;
+ } else if (ipv6_mask->hdr.dst_addr[j]) {
+ input_set |= ICE_INSET_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
+ else if (ipv6_mask->hdr.proto)
+ input_set |=
+ ICE_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.hop_limits)
+ input_set |=
+ ICE_INSET_IPV6_HOP_LIMIT;
+ if ((ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |=
+ (RTE_IPV6_HDR_TC_MASK)) &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_TC;
- } else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |= ICE_INSET_IPV6_TC;
- }
+ input_set |= ICE_INSET_IPV6_TC;
+
list[t].type = (tunnel_valid == 0) ?
ICE_IPV6_OFOS : ICE_IPV6_IL;
struct ice_ipv6_hdr *f;
@@ -568,35 +558,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr[j]) {
f->src_addr[j] =
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
}
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
}
}
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ if (ipv6_mask->hdr.proto) {
f->next_hdr =
ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
+ s->next_hdr =
+ ipv6_mask->hdr.proto;
}
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
+ s->hop_limit =
+ ipv6_mask->hdr.hop_limits;
}
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK)) {
struct ice_le_ver_tc_flow vtf;
vtf.u.fld.version = 0;
@@ -606,13 +594,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
- vtf.u.fld.tc = UINT8_MAX;
+ vtf.u.fld.tc = (rte_be_to_cpu_32
+ (ipv6_mask->hdr.vtc_flow) &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
}
t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -631,21 +619,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_UDP_DST_PORT;
} else {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_UDP_DST_PORT;
}
@@ -654,21 +638,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].type = ICE_UDP_OF;
else
list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
+ if (udp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
}
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
}
t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
}
break;
@@ -692,40 +674,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_TCP_DST_PORT;
} else {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TCP_DST_PORT;
}
list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ if (tcp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
}
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
}
t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
}
break;
@@ -743,40 +719,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_SCTP_DST_PORT;
} else {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_SCTP_DST_PORT;
}
list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ if (sctp_mask->hdr.src_port) {
list[t].h_u.sctp_hdr.src_port =
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
}
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
}
t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
}
break;
@@ -799,21 +769,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
+ if (vxlan_mask->vni[0] ||
+ vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) {
list[t].h_u.tnl_hdr.vni =
(vxlan_spec->vni[2] << 16) |
(vxlan_spec->vni[1] << 8) |
vxlan_spec->vni[0];
list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
+ (vxlan_mask->vni[2] << 16) |
+ (vxlan_mask->vni[1] << 8) |
+ vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
}
t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
}
break;
@@ -835,21 +805,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
+ if (nvgre_mask->tni[0] ||
+ nvgre_mask->tni[1] ||
+ nvgre_mask->tni[2]) {
list[t].h_u.nvgre_hdr.tni_flow =
(nvgre_spec->tni[2] << 16) |
(nvgre_spec->tni[1] << 8) |
nvgre_spec->tni[0];
list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
+ (nvgre_mask->tni[2] << 16) |
+ (nvgre_mask->tni[1] << 8) |
+ nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
}
t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
}
break;
@@ -870,23 +840,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (vlan_spec && vlan_mask) {
list[t].type = ICE_VLAN_OFOS;
- if (vlan_mask->tci == UINT16_MAX) {
+ if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
- UINT16_MAX;
+ vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
}
- if (vlan_mask->inner_type == UINT16_MAX) {
+ if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
- UINT16_MAX;
+ vlan_mask->inner_type;
input_set |= ICE_INSET_VLAN_OUTER;
}
t++;
- } else if (!vlan_spec && !vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
}
break;
@@ -918,19 +886,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
list[t].type = ICE_PPPOE;
- if (pppoe_mask->session_id == UINT16_MAX) {
+ if (pppoe_mask->session_id) {
list[t].h_u.pppoe_hdr.session_id =
pppoe_spec->session_id;
list[t].m_u.pppoe_hdr.session_id =
- UINT16_MAX;
+ pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
}
t++;
pppoe_valid = 1;
- } else if (!pppoe_spec && !pppoe_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +918,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (pppoe_valid)
t--;
list[t].type = ICE_PPPOE;
- if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ if (pppoe_proto_mask->proto_id) {
list[t].h_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_spec->proto_id;
list[t].m_u.pppoe_hdr.ppp_prot_id =
- UINT16_MAX;
+ pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
}
t++;
- } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 04/13] net/ice: add support for MAC VLAN rule
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (2 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 05/13] net/ice: change default tunnle type Wei Zhao
` (9 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add support for MAC VLAN rule,
it enable swicth filter to direct packet base on
mac address and vlan id.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4edaea3f5..ed02d9805 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -107,6 +110,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_os[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 05/13] net/ice: change default tunnle type
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (3 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth Wei Zhao
` (8 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The default tunnle type for swicth filter change to new
defination of ICE_SW_TUN_AND_NON_TUN in order that the rule
will be apply to more packet type.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index ed02d9805..d9bdf9637 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1091,7 +1091,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
uint16_t lkups_num = 0;
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ enum ice_sw_tunnel_type tun_type =
+ ICE_SW_TUN_AND_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (4 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 05/13] net/ice: change default tunnle type Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 3:15 ` Zhang, Qi Z
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
` (7 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The action number can only be one for DCF or PF
switch filter, not support not support multiple actions.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index d9bdf9637..cc48f22dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
return -rte_errno;
}
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (actions_num > 1) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1158,6 +1198,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ ret = ice_switch_check_action(actions, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Invalid input action number");
+ goto error;
+ }
+
if (ad->hw.dcf_enabled)
ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
else
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 07/13] net/ice: add support for ESP/AH/L2TP
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (5 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP Wei Zhao
` (6 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add support for ESP/AH/L2TP packets,
it enable swicth filter to direct ipv6 packets with
ESP/AH/L2TP payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 19 +++++++
drivers/net/ice/ice_generic_flow.h | 9 +++
drivers/net/ice/ice_switch_filter.c | 87 +++++++++++++++++++++++++++--
3 files changed, 109 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 0fdc7e617..189ef6c4a 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1382,6 +1382,25 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = {
RTE_FLOW_ITEM_TYPE_ICMP6,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
struct rte_flow *flow,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 3361ecbd9..006fd00b3 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -391,6 +391,15 @@ extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
+/* ESP */
+extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+
+/* AH */
+extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+
+/* L2TP */
+extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index cc48f22dd..9c87a16dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -148,6 +148,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes_proto,
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -212,6 +218,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -319,7 +331,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
struct rte_flow_error *error,
struct ice_adv_lkup_elem *list,
uint16_t *lkups_num,
- enum ice_sw_tunnel_type tun_type)
+ enum ice_sw_tunnel_type *tun_type)
{
const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type;
@@ -335,10 +347,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
+ const struct rte_flow_item_ah *ah_spec, *ah_mask;
+ const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
+ uint16_t ipv6_valiad = 0;
for (item = pattern; item->type !=
@@ -504,6 +520,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_IPV6:
ipv6_spec = item->spec;
ipv6_mask = item->mask;
+ ipv6_valiad = 1;
if (ipv6_spec && ipv6_mask) {
if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
@@ -642,7 +659,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
input_set |=
ICE_INSET_UDP_DST_PORT;
}
- if (tun_type == ICE_SW_TUN_VXLAN &&
+ if (*tun_type == ICE_SW_TUN_VXLAN &&
tunnel_valid == 0)
list[t].type = ICE_UDP_OF;
else
@@ -938,6 +955,48 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+ if (esp_spec || esp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid esp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_AH:
+ ah_spec = item->spec;
+ ah_mask = item->mask;
+ if (ah_spec || ah_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ah item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ l2tp_spec = item->spec;
+ l2tp_mask = item->mask;
+ if (l2tp_spec || l2tp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid l2tp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1113,6 +1172,21 @@ ice_switch_check_action(const struct rte_flow_action *actions,
return 0;
}
+static bool
+ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
+{
+ switch (tun_type) {
+ case ICE_SW_TUN_PROFID_IPV6_ESP:
+ case ICE_SW_TUN_PROFID_IPV6_AH:
+ case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1168,8 +1242,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
return -rte_errno;
}
- rule_info.tun_type = tun_type;
-
sw_meta_ptr =
rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
if (!sw_meta_ptr) {
@@ -1189,8 +1261,9 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
}
inputset = ice_switch_inset_get
- (pattern, error, list, &lkups_num, tun_type);
- if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
+ (pattern, error, list, &lkups_num, &tun_type);
+ if ((!inputset && !ice_is_profile_rule(tun_type)) ||
+ (inputset & ~pattern_match_item->input_set_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
pattern,
@@ -1198,6 +1271,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ rule_info.tun_type = tun_type;
+
ret = ice_switch_check_action(actions, error);
if (ret) {
rte_flow_error_set(error, EINVAL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (6 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 3:16 ` Zhang, Qi Z
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
` (5 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter support for PFCP packets,
it enable swicth filter to direct ipv4/ipv6 packets with
PFCP session or node payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 15 +++++++
drivers/net/ice/ice_generic_flow.h | 6 +++
drivers/net/ice/ice_switch_filter.c | 62 +++++++++++++++++++++++++++++
3 files changed, 83 insertions(+)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 189ef6c4a..04dcaba08 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1400,6 +1400,21 @@ enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 006fd00b3..65cd64c7f 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -400,6 +400,12 @@ extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+/* PFCP */
+extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_pfcp[];
+
+
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9c87a16dd..9b4b9346c 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -154,6 +154,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -224,6 +228,10 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -350,6 +358,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_esp *esp_spec, *esp_mask;
const struct rte_flow_item_ah *ah_spec, *ah_mask;
const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
+ const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -996,6 +1005,55 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
break;
+ case RTE_FLOW_ITEM_TYPE_PFCP:
+ pfcp_spec = item->spec;
+ pfcp_mask = item->mask;
+ /* Check if PFCP item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pfcp_spec && pfcp_mask) ||
+ (pfcp_spec && !pfcp_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid PFCP item");
+ return -ENOTSUP;
+ }
+ if (pfcp_spec && pfcp_mask) {
+ /* Check pfcp mask and update input set */
+ if (pfcp_mask->msg_type ||
+ pfcp_mask->msg_len ||
+ pfcp_mask->seid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pfcp mask");
+ return -ENOTSUP;
+ }
+ if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01 &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
+ else
+ return -ENOTSUP;
+ }
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1179,6 +1237,10 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
return true;
default:
break;
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 09/13] net/ice: add support for IPv6 NAT-T
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (7 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage Wei Zhao
` (4 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter support for IPv6 NAT-T packets,
it enable swicth filter to direct ipv6 packets with
NAT-T payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 14 ++++++++++++++
drivers/net/ice/ice_generic_flow.h | 2 ++
drivers/net/ice/ice_switch_filter.c | 19 +++++++++++++++++--
3 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 04dcaba08..3365aeb86 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1394,6 +1394,20 @@ enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
RTE_FLOW_ITEM_TYPE_AH,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 65cd64c7f..25badf192 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -393,9 +393,11 @@ extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
/* ESP */
extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[];
/* AH */
extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9b4b9346c..4248b8911 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -150,8 +150,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -224,8 +228,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -364,6 +372,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
uint16_t ipv6_valiad = 0;
+ uint16_t udp_valiad = 0;
for (item = pattern; item->type !=
@@ -642,6 +651,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = item->spec;
udp_mask = item->mask;
+ udp_valiad = 1;
if (udp_spec && udp_mask) {
/* Check UDP mask and update input set*/
if (udp_mask->hdr.dgram_len ||
@@ -974,7 +984,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid esp item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
break;
@@ -988,7 +1000,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid ah item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
break;
@@ -1237,6 +1251,7 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV6_NAT_T:
case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (8 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 3:20 ` Zhang, Qi Z
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 11/13] net/ice: fix input set of VLAN item Wei Zhao
` (3 subsequent siblings)
13 siblings, 1 reply; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter permit stage support
for more flow pattern in pf only pipeline mode.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4248b8911..81d069e99 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -200,6 +200,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype,
+ ICE_SW_INSET_ETHER, ICE_INSET_NONE},
{pattern_ethertype_vlan,
ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -226,6 +228,18 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_udp_esp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 11/13] net/ice: fix input set of VLAN item
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (9 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 12/13] net/ice: enable flow redirect on switch Wei Zhao
` (2 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The input set for inner type of vlan item should
be ICE_INSET_ETHERTYPE, not ICE_INSET_VLAN_OUTER.
This mac vlan filter is also part of DCF switch filter.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 81d069e99..686f9c3e3 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -911,7 +911,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
vlan_mask->inner_type;
- input_set |= ICE_INSET_VLAN_OUTER;
+ input_set |= ICE_INSET_ETHERTYPE;
}
t++;
}
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 12/13] net/ice: enable flow redirect on switch
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (10 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 11/13] net/ice: fix input set of VLAN item Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Beilei Xing
Enable flow redirect on switch, currently only
support VSI redirect.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 73 +++++++++++++++++++++++++++++
1 file changed, 73 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 686f9c3e3..55a5618a7 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1420,6 +1420,78 @@ ice_switch_query(struct ice_adapter *ad __rte_unused,
return -rte_errno;
}
+static int
+ice_switch_redirect(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct ice_flow_redirect *rd)
+{
+ struct ice_rule_query_data *rdata = flow->rule;
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_adv_lkup_elem *lkups_dp = NULL;
+ struct LIST_HEAD_TYPE *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_hw *hw = &ad->hw;
+ struct ice_switch_info *sw;
+ uint16_t lkups_cnt;
+ int ret;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[rdata->rid].recp_created)
+ return -EINVAL;
+
+ if (rd->type != ICE_FLOW_REDIRECT_VSI)
+ return -ENOTSUP;
+
+ list_head = &sw->recp_list[rdata->rid].filt_rules;
+ LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+ if (rinfo.fltr_rule_id == rdata->rule_id &&
+ rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
+ rinfo.sw_act.vsi_handle == rd->vsi_handle) {
+ lkups_cnt = list_itr->lkups_cnt;
+ lkups_dp = (struct ice_adv_lkup_elem *)
+ ice_memdup(hw, list_itr->lkups,
+ sizeof(*list_itr->lkups) *
+ lkups_cnt, ICE_NONDMA_TO_NONDMA);
+ if (!lkups_dp) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -EINVAL;
+ }
+
+ break;
+ }
+ }
+
+ if (!lkups_dp)
+ return 0;
+
+ /* Remove the old rule */
+ ret = ice_rem_adv_rule(hw, list_itr->lkups,
+ lkups_cnt, &rinfo);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
+ rdata->rule_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Update VSI context */
+ hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
+
+ /* Replay the rule */
+ ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
+ &rinfo, rdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to replay the rule");
+ ret = -EINVAL;
+ }
+
+out:
+ ice_free(hw, lkups_dp);
+ return ret;
+}
+
static int
ice_switch_init(struct ice_adapter *ad)
{
@@ -1465,6 +1537,7 @@ ice_flow_engine ice_switch_engine = {
.create = ice_switch_create,
.destroy = ice_switch_destroy,
.query_count = ice_switch_query,
+ .redirect = ice_switch_redirect,
.free = ice_switch_filter_rule_free,
.type = ICE_FLOW_ENGINE_SWITCH,
};
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v3 13/13] net/ice: redirect switch rule to new VSI
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (11 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 12/13] net/ice: enable flow redirect on switch Wei Zhao
@ 2020-04-03 2:43 ` Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 2:43 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Beilei Xing
After VF reset, VF's VSI number may be changed,
the switch rule which forwards packet to the old
VSI number should be redirected to the new VSI
number.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_dcf_parent.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 37f0e2be2..e05b6b3e5 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -19,6 +19,8 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
uint16_t vsi_map)
{
struct ice_vsi_ctx *vsi_ctx;
+ bool first_update = false;
+ uint16_t new_vsi_num;
if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
@@ -35,11 +37,25 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
vsi_handle);
return;
}
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ first_update = true;
}
- vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
- VIRTCHNL_DCF_VF_VSI_ID_S;
- hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+
+ /* Redirect rules if vsi mapping table changes. */
+ if (!first_update && vsi_ctx->vsi_num != new_vsi_num) {
+ struct ice_flow_redirect rd;
+
+ memset(&rd, 0, sizeof(struct ice_flow_redirect));
+ rd.type = ICE_FLOW_REDIRECT_VSI;
+ rd.vsi_handle = vsi_handle;
+ rd.new_vsi_num = new_vsi_num;
+ ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+ } else {
+ vsi_ctx->vsi_num = new_vsi_num;
+ }
PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
vsi_handle, vsi_ctx->vsi_num);
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth Wei Zhao
@ 2020-04-03 3:15 ` Zhang, Qi Z
0 siblings, 0 replies; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-03 3:15 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Lu, Nannan, Fu, Qi, Peng, Yuan, stable
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Friday, April 3, 2020 10:44 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Peng, Yuan <yuan.peng@intel.com>;
> stable@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [PATCH v3 06/13] net/ice: add action number check for swicth
>
> The action number can only be one for DCF or PF switch filter, not support
> not support multiple actions.
Not support multiple actions
>
> Cc: stable@dpdk.org
> Fixes: 47d460d63233 ("net/ice: rework switch filter")
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_switch_filter.c | 48
> +++++++++++++++++++++++++++++
> 1 file changed, 48 insertions(+)
>
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> index d9bdf9637..cc48f22dd 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
> return -rte_errno;
> }
>
> +static int
> +ice_switch_check_action(const struct rte_flow_action *actions,
> + struct rte_flow_error *error)
> +{
> + const struct rte_flow_action *action;
> + enum rte_flow_action_type action_type;
> + uint16_t actions_num = 0;
> +
> + for (action = actions; action->type !=
> + RTE_FLOW_ACTION_TYPE_END; action++) {
> + action_type = action->type;
> + switch (action_type) {
> + case RTE_FLOW_ACTION_TYPE_VF:
> + case RTE_FLOW_ACTION_TYPE_RSS:
> + case RTE_FLOW_ACTION_TYPE_QUEUE:
> + case RTE_FLOW_ACTION_TYPE_DROP:
> + actions_num++;
> + break;
> + case RTE_FLOW_ACTION_TYPE_VOID:
> + continue;
> + default:
> + rte_flow_error_set(error,
> + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> + actions,
> + "Invalid action type");
> + return -rte_errno;
> + }
> + }
> +
> + if (actions_num > 1) {
> + rte_flow_error_set(error,
> + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> + actions,
> + "Invalid action number");
> + return -rte_errno;
> + }
> +
> + return 0;
> +}
> +
> static int
> ice_switch_parse_pattern_action(struct ice_adapter *ad,
> struct ice_pattern_match_item *array, @@ -1158,6 +1198,14 @@
> ice_switch_parse_pattern_action(struct ice_adapter *ad,
> goto error;
> }
>
> + ret = ice_switch_check_action(actions, error);
> + if (ret) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Invalid input action number");
> + goto error;
> + }
> +
> if (ad->hw.dcf_enabled)
> ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
> else
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP Wei Zhao
@ 2020-04-03 3:16 ` Zhang, Qi Z
2020-04-03 3:18 ` Zhao1, Wei
0 siblings, 1 reply; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-03 3:16 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Lu, Nannan, Fu, Qi, Peng, Yuan
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Friday, April 3, 2020 10:44 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v3 08/13] net/ice: add support for PFCP
>
> This patch add switch filter support for PFCP packets, it enable swicth filter
> to direct ipv4/ipv6 packets with PFCP session or node payload to specific
> action.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_generic_flow.c | 15 +++++++
> drivers/net/ice/ice_generic_flow.h | 6 +++
> drivers/net/ice/ice_switch_filter.c | 62 +++++++++++++++++++++++++++++
> 3 files changed, 83 insertions(+)
>
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> index 189ef6c4a..04dcaba08 100644
> --- a/drivers/net/ice/ice_generic_flow.c
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -1400,6 +1400,21 @@ enum rte_flow_item_type
> pattern_eth_ipv6_l2tp[] = {
> RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
> RTE_FLOW_ITEM_TYPE_END,
> };
> +enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = {
> + RTE_FLOW_ITEM_TYPE_ETH,
> + RTE_FLOW_ITEM_TYPE_IPV4,
> + RTE_FLOW_ITEM_TYPE_UDP,
> + RTE_FLOW_ITEM_TYPE_PFCP,
> + RTE_FLOW_ITEM_TYPE_END,
> +};
> +enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
> + RTE_FLOW_ITEM_TYPE_ETH,
> + RTE_FLOW_ITEM_TYPE_IPV6,
> + RTE_FLOW_ITEM_TYPE_UDP,
> + RTE_FLOW_ITEM_TYPE_PFCP,
> + RTE_FLOW_ITEM_TYPE_END,
> +};
> +
>
>
> typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
> diff --git a/drivers/net/ice/ice_generic_flow.h
> b/drivers/net/ice/ice_generic_flow.h
> index 006fd00b3..65cd64c7f 100644
> --- a/drivers/net/ice/ice_generic_flow.h
> +++ b/drivers/net/ice/ice_generic_flow.h
> @@ -400,6 +400,12 @@ extern enum rte_flow_item_type
> pattern_eth_ipv6_ah[];
> /* L2TP */
> extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
>
> +/* PFCP */
> +extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[]; extern enum
> +rte_flow_item_type pattern_eth_ipv6_pfcp[];
> +
> +
> +
Remove dummy empty lines
> struct ice_adapter;
>
> extern const struct rte_flow_ops ice_flow_ops; diff --git
> a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
> index 9c87a16dd..9b4b9346c 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -154,6 +154,10 @@ ice_pattern_match_item
> ice_switch_pattern_dist_comms[] = {
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_l2tp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv4_pfcp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_pfcp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> };
>
> static struct
> @@ -224,6 +228,10 @@ ice_pattern_match_item
> ice_switch_pattern_perm[] = {
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_l2tp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv4_pfcp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> + {pattern_eth_ipv6_pfcp,
> + ICE_INSET_NONE, ICE_INSET_NONE},
> };
>
> static int
> @@ -350,6 +358,7 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> const struct rte_flow_item_esp *esp_spec, *esp_mask;
> const struct rte_flow_item_ah *ah_spec, *ah_mask;
> const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
> + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
> uint64_t input_set = ICE_INSET_NONE;
> uint16_t j, t = 0;
> uint16_t tunnel_valid = 0;
> @@ -996,6 +1005,55 @@ ice_switch_inset_get(const struct rte_flow_item
> pattern[],
> if (ipv6_valiad)
> *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
> break;
> + case RTE_FLOW_ITEM_TYPE_PFCP:
> + pfcp_spec = item->spec;
> + pfcp_mask = item->mask;
> + /* Check if PFCP item is used to describe protocol.
> + * If yes, both spec and mask should be NULL.
> + * If no, both spec and mask shouldn't be NULL.
> + */
> + if ((!pfcp_spec && pfcp_mask) ||
> + (pfcp_spec && !pfcp_mask)) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid PFCP item");
> + return -ENOTSUP;
> + }
> + if (pfcp_spec && pfcp_mask) {
> + /* Check pfcp mask and update input set */
> + if (pfcp_mask->msg_type ||
> + pfcp_mask->msg_len ||
> + pfcp_mask->seid) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid pfcp mask");
> + return -ENOTSUP;
> + }
> + if (pfcp_mask->s_field &&
> + pfcp_spec->s_field == 0x01 &&
> + ipv6_valiad)
> + *tun_type =
> + ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
> + else if (pfcp_mask->s_field &&
> + pfcp_spec->s_field == 0x01)
> + *tun_type =
> + ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
> + else if (pfcp_mask->s_field &&
> + !pfcp_spec->s_field &&
> + ipv6_valiad)
> + *tun_type =
> + ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
> + else if (pfcp_mask->s_field &&
> + !pfcp_spec->s_field)
> + *tun_type =
> + ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
> + else
> + return -ENOTSUP;
> + }
> + break;
> +
>
> case RTE_FLOW_ITEM_TYPE_VOID:
> break;
> @@ -1179,6 +1237,10 @@ ice_is_profile_rule(enum ice_sw_tunnel_type
> tun_type)
> case ICE_SW_TUN_PROFID_IPV6_ESP:
> case ICE_SW_TUN_PROFID_IPV6_AH:
> case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
> + case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
> + case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
> + case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
> + case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
> return true;
> default:
> break;
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP
2020-04-03 3:16 ` Zhang, Qi Z
@ 2020-04-03 3:18 ` Zhao1, Wei
0 siblings, 0 replies; 69+ messages in thread
From: Zhao1, Wei @ 2020-04-03 3:18 UTC (permalink / raw)
To: Zhang, Qi Z, dev; +Cc: Lu, Nannan, Fu, Qi, Peng, Yuan
Ok
> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Friday, April 3, 2020 11:17 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Lu, Nannan <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Peng, Yuan
> <yuan.peng@intel.com>
> Subject: RE: [PATCH v3 08/13] net/ice: add support for PFCP
>
>
>
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zhao1@intel.com>
> > Sent: Friday, April 3, 2020 10:44 AM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan
> > <nannan.lu@intel.com>; Fu, Qi <qi.fu@intel.com>; Peng, Yuan
> > <yuan.peng@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [PATCH v3 08/13] net/ice: add support for PFCP
> >
> > This patch add switch filter support for PFCP packets, it enable
> > swicth filter to direct ipv4/ipv6 packets with PFCP session or node
> > payload to specific action.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/ice/ice_generic_flow.c | 15 +++++++
> > drivers/net/ice/ice_generic_flow.h | 6 +++
> > drivers/net/ice/ice_switch_filter.c | 62 +++++++++++++++++++++++++++++
> > 3 files changed, 83 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_generic_flow.c
> > b/drivers/net/ice/ice_generic_flow.c
> > index 189ef6c4a..04dcaba08 100644
> > --- a/drivers/net/ice/ice_generic_flow.c
> > +++ b/drivers/net/ice/ice_generic_flow.c
> > @@ -1400,6 +1400,21 @@ enum rte_flow_item_type
> pattern_eth_ipv6_l2tp[]
> > = {
> > RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
> > RTE_FLOW_ITEM_TYPE_END,
> > };
> > +enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = {
> > + RTE_FLOW_ITEM_TYPE_ETH,
> > + RTE_FLOW_ITEM_TYPE_IPV4,
> > + RTE_FLOW_ITEM_TYPE_UDP,
> > + RTE_FLOW_ITEM_TYPE_PFCP,
> > + RTE_FLOW_ITEM_TYPE_END,
> > +};
> > +enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
> > + RTE_FLOW_ITEM_TYPE_ETH,
> > + RTE_FLOW_ITEM_TYPE_IPV6,
> > + RTE_FLOW_ITEM_TYPE_UDP,
> > + RTE_FLOW_ITEM_TYPE_PFCP,
> > + RTE_FLOW_ITEM_TYPE_END,
> > +};
> > +
> >
> >
> > typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter
> > *ad, diff --git a/drivers/net/ice/ice_generic_flow.h
> > b/drivers/net/ice/ice_generic_flow.h
> > index 006fd00b3..65cd64c7f 100644
> > --- a/drivers/net/ice/ice_generic_flow.h
> > +++ b/drivers/net/ice/ice_generic_flow.h
> > @@ -400,6 +400,12 @@ extern enum rte_flow_item_type
> > pattern_eth_ipv6_ah[];
> > /* L2TP */
> > extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
> >
> > +/* PFCP */
> > +extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[]; extern enum
> > +rte_flow_item_type pattern_eth_ipv6_pfcp[];
> > +
> > +
> > +
>
> Remove dummy empty lines
>
> > struct ice_adapter;
> >
> > extern const struct rte_flow_ops ice_flow_ops; diff --git
> > a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index 9c87a16dd..9b4b9346c 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -154,6 +154,10 @@ ice_pattern_match_item
> > ice_switch_pattern_dist_comms[] = {
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_l2tp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv4_pfcp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_pfcp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > };
> >
> > static struct
> > @@ -224,6 +228,10 @@ ice_pattern_match_item
> ice_switch_pattern_perm[]
> > = {
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > {pattern_eth_ipv6_l2tp,
> > ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv4_pfcp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > + {pattern_eth_ipv6_pfcp,
> > + ICE_INSET_NONE, ICE_INSET_NONE},
> > };
> >
> > static int
> > @@ -350,6 +358,7 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > const struct rte_flow_item_esp *esp_spec, *esp_mask;
> > const struct rte_flow_item_ah *ah_spec, *ah_mask;
> > const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
> > + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
> > uint64_t input_set = ICE_INSET_NONE;
> > uint16_t j, t = 0;
> > uint16_t tunnel_valid = 0;
> > @@ -996,6 +1005,55 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> > if (ipv6_valiad)
> > *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
> > break;
> > + case RTE_FLOW_ITEM_TYPE_PFCP:
> > + pfcp_spec = item->spec;
> > + pfcp_mask = item->mask;
> > + /* Check if PFCP item is used to describe protocol.
> > + * If yes, both spec and mask should be NULL.
> > + * If no, both spec and mask shouldn't be NULL.
> > + */
> > + if ((!pfcp_spec && pfcp_mask) ||
> > + (pfcp_spec && !pfcp_mask)) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid PFCP item");
> > + return -ENOTSUP;
> > + }
> > + if (pfcp_spec && pfcp_mask) {
> > + /* Check pfcp mask and update input set */
> > + if (pfcp_mask->msg_type ||
> > + pfcp_mask->msg_len ||
> > + pfcp_mask->seid) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid pfcp mask");
> > + return -ENOTSUP;
> > + }
> > + if (pfcp_mask->s_field &&
> > + pfcp_spec->s_field == 0x01 &&
> > + ipv6_valiad)
> > + *tun_type =
> > + ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
> > + else if (pfcp_mask->s_field &&
> > + pfcp_spec->s_field == 0x01)
> > + *tun_type =
> > + ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
> > + else if (pfcp_mask->s_field &&
> > + !pfcp_spec->s_field &&
> > + ipv6_valiad)
> > + *tun_type =
> > + ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
> > + else if (pfcp_mask->s_field &&
> > + !pfcp_spec->s_field)
> > + *tun_type =
> > + ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
> > + else
> > + return -ENOTSUP;
> > + }
> > + break;
> > +
> >
> > case RTE_FLOW_ITEM_TYPE_VOID:
> > break;
> > @@ -1179,6 +1237,10 @@ ice_is_profile_rule(enum ice_sw_tunnel_type
> > tun_type)
> > case ICE_SW_TUN_PROFID_IPV6_ESP:
> > case ICE_SW_TUN_PROFID_IPV6_AH:
> > case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
> > + case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
> > + case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
> > + case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
> > + case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
> > return true;
> > default:
> > break;
> > --
> > 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage Wei Zhao
@ 2020-04-03 3:20 ` Zhang, Qi Z
0 siblings, 0 replies; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-03 3:20 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Lu, Nannan, Fu, Qi, Peng, Yuan
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Friday, April 3, 2020 10:44 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Peng, Yuan <yuan.peng@intel.com>; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: [PATCH v3 10/13] net/ice: add more flow support for permit stage
>
> This patch add switch filter permit stage support for more flow pattern in pf
> only pipeline mode.
It should be "permission stage".
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
> 1 file changed, 14 insertions(+)
>
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> index 4248b8911..81d069e99 100644
> --- a/drivers/net/ice/ice_switch_filter.c
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -200,6 +200,8 @@ ice_pattern_match_item
> ice_switch_pattern_dist_os[] = {
>
> static struct
> ice_pattern_match_item ice_switch_pattern_perm[] = {
> + {pattern_ethertype,
> + ICE_SW_INSET_ETHER, ICE_INSET_NONE},
> {pattern_ethertype_vlan,
> ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
> {pattern_eth_ipv4,
> @@ -226,6 +228,18 @@ ice_pattern_match_item
> ice_switch_pattern_perm[] = {
> ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
> {pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
> ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
> + {pattern_eth_pppoed,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoed,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_pppoes,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoes,
> + ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
> + {pattern_eth_pppoes_proto,
> + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> + {pattern_eth_vlan_pppoes_proto,
> + ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
> {pattern_eth_ipv6_esp,
> ICE_INSET_NONE, ICE_INSET_NONE},
> {pattern_eth_ipv6_udp_esp,
> --
> 2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
` (12 preceding siblings ...)
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
@ 2020-04-03 4:45 ` Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF Wei Zhao
` (13 more replies)
13 siblings, 14 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:45 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng
A DCF (Device Config Function) framework has been add for intel device,
this patch set add add switch filter support for it, this set also fix
bugs which block this feature.
This patchset is based on:
[1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
Depends-on: series-8859
v2:
-add switch filter support for AH/ESP/PFCP packet
-fix some patch check warning
-add flow redirect on switch patch
v3:
-update commit log
-update in redirect on switch patch
v4:
-update as comment
Beilei Xing (2):
net/ice: enable flow redirect on switch
net/ice: redirect switch rule to new VSI
Wei Zhao (11):
net/ice: enable switch flow on DCF
net/ice: support for more PPPoE input set
net/ice: change swicth parser to support flexible mask
net/ice: add support for MAC VLAN rule
net/ice: change default tunnle type
net/ice: add action number check for swicth
net/ice: add support for ESP/AH/L2TP
net/ice: add support for PFCP
net/ice: add support for IPv6 NAT-T
net/ice: add more flow support for permission stage
net/ice: fix input set of VLAN item
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +-
drivers/net/ice/ice_dcf_parent.c | 30 +-
drivers/net/ice/ice_fdir_filter.c | 6 +
drivers/net/ice/ice_generic_flow.c | 61 +++
drivers/net/ice/ice_generic_flow.h | 24 +
drivers/net/ice/ice_hash.c | 6 +
drivers/net/ice/ice_switch_filter.c | 730 +++++++++++++++++++------
8 files changed, 682 insertions(+), 187 deletions(-)
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
@ 2020-04-03 4:45 ` Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 02/13] net/ice: support for more PPPoE input set Wei Zhao
` (12 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:45 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
DCF on CVL is a control plane VF which take the responsibility to
configure all the PF/global resources, this patch add support DCF
on to program forward rule to direct packetS to VFs.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
doc/guides/rel_notes/release_20_05.rst | 2 +-
drivers/net/ice/ice_dcf_ethdev.c | 10 +++++--
drivers/net/ice/ice_dcf_parent.c | 8 ++++++
drivers/net/ice/ice_fdir_filter.c | 6 ++++
drivers/net/ice/ice_hash.c | 6 ++++
drivers/net/ice/ice_switch_filter.c | 39 +++++++++++++++++++++++++-
6 files changed, 67 insertions(+), 4 deletions(-)
diff --git a/doc/guides/rel_notes/release_20_05.rst b/doc/guides/rel_notes/release_20_05.rst
index 9bc647284..bde7e47fb 100644
--- a/doc/guides/rel_notes/release_20_05.rst
+++ b/doc/guides/rel_notes/release_20_05.rst
@@ -68,7 +68,7 @@ New Features
Updated the Intel ice driver with new features and improvements, including:
* Added support for DCF (Device Config Function) feature.
-
+ * Added switch filter support for intel DCF.
Removed Items
-------------
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index af94caeff..e5ba1a61f 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -115,8 +115,8 @@ ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
static int
ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
- __rte_unused enum rte_filter_op filter_op,
- __rte_unused void *arg)
+ enum rte_filter_op filter_op,
+ void *arg)
{
int ret = 0;
@@ -124,6 +124,12 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
return -EINVAL;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ice_flow_ops;
+ break;
+
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index ff08292a1..37f0e2be2 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -9,6 +9,7 @@
#include <rte_spinlock.h>
#include "ice_dcf_ethdev.h"
+#include "ice_generic_flow.h"
#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */
static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
@@ -321,6 +322,12 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
}
parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
+ err = ice_flow_init(parent_adapter);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Failed to initialize flow");
+ goto uninit_hw;
+ }
+
ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map);
mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
@@ -347,5 +354,6 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = NULL;
+ ice_flow_uninit(parent_adapter);
ice_dcf_uninit_parent_hw(parent_hw);
}
diff --git a/drivers/net/ice/ice_fdir_filter.c b/drivers/net/ice/ice_fdir_filter.c
index a082a13df..1a85d6cc1 100644
--- a/drivers/net/ice/ice_fdir_filter.c
+++ b/drivers/net/ice/ice_fdir_filter.c
@@ -1061,6 +1061,9 @@ ice_fdir_init(struct ice_adapter *ad)
struct ice_flow_parser *parser;
int ret;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
ret = ice_fdir_setup(pf);
if (ret)
return ret;
@@ -1081,6 +1084,9 @@ ice_fdir_uninit(struct ice_adapter *ad)
struct ice_pf *pf = &ad->pf;
struct ice_flow_parser *parser;
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
parser = &ice_fdir_parser_comms;
else
diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c
index 0fdd4d68d..72c8ddc9a 100644
--- a/drivers/net/ice/ice_hash.c
+++ b/drivers/net/ice/ice_hash.c
@@ -243,6 +243,9 @@ ice_hash_init(struct ice_adapter *ad)
{
struct ice_flow_parser *parser = NULL;
+ if (ad->hw.dcf_enabled)
+ return 0;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
parser = &ice_hash_parser_os;
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
@@ -565,6 +568,9 @@ ice_hash_destroy(struct ice_adapter *ad,
static void
ice_hash_uninit(struct ice_adapter *ad)
{
+ if (ad->hw.dcf_enabled)
+ return;
+
if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
ice_unregister_parser(&ice_hash_parser_os, ad);
else if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 66dc158ef..4db8f1471 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -913,6 +913,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
+static int
+ice_switch_parse_dcf_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct ice_adv_rule_info *rule_info)
+{
+ const struct rte_flow_action_vf *act_vf;
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI;
+ act_vf = action->conf;
+ rule_info->sw_act.vsi_handle = act_vf->id;
+ break;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+ }
+ }
+
+ rule_info->sw_act.src = rule_info->sw_act.vsi_handle;
+ rule_info->rx = 1;
+ rule_info->priority = 5;
+
+ return 0;
+}
static int
ice_switch_parse_action(struct ice_pf *pf,
@@ -1081,7 +1114,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
- ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+ if (ad->hw.dcf_enabled)
+ ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
+ else
+ ret = ice_switch_parse_action(pf, actions, error, &rule_info);
+
if (ret) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 02/13] net/ice: support for more PPPoE input set
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF Wei Zhao
@ 2020-04-03 4:45 ` Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
` (11 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:45 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add more support for PPPoE packet,
it enable swicth filter to direct PPPoE packet base on
session id and PPP protocol type.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 13 +++++
drivers/net/ice/ice_generic_flow.h | 9 ++++
drivers/net/ice/ice_switch_filter.c | 82 +++++++++++++++++++++++++++--
3 files changed, 99 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index c0420797e..0fdc7e617 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1122,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = {
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_vlan_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
RTE_FLOW_ITEM_TYPE_PPPOES,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_PPPOES,
+ RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_qinq_pppoes[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index ede6ec824..3361ecbd9 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -30,6 +30,7 @@
#define ICE_PROT_VXLAN (1ULL << 19)
#define ICE_PROT_NVGRE (1ULL << 20)
#define ICE_PROT_GTPU (1ULL << 21)
+#define ICE_PROT_PPPOE_S (1ULL << 22)
/* field */
@@ -49,6 +50,8 @@
#define ICE_NVGRE_TNI (1ULL << 50)
#define ICE_GTPU_TEID (1ULL << 49)
#define ICE_GTPU_QFI (1ULL << 48)
+#define ICE_PPPOE_SESSION (1ULL << 47)
+#define ICE_PPPOE_PROTO (1ULL << 46)
/* input set */
@@ -177,6 +180,10 @@
(ICE_PROT_GTPU | ICE_GTPU_TEID)
#define ICE_INSET_GTPU_QFI \
(ICE_PROT_GTPU | ICE_GTPU_QFI)
+#define ICE_INSET_PPPOE_SESSION \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_SESSION)
+#define ICE_INSET_PPPOE_PROTO \
+ (ICE_PROT_PPPOE_S | ICE_PPPOE_PROTO)
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
@@ -349,7 +356,9 @@ extern enum rte_flow_item_type pattern_eth_pppoed[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoed[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoed[];
extern enum rte_flow_item_type pattern_eth_pppoes[];
+extern enum rte_flow_item_type pattern_eth_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes[];
+extern enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes[];
extern enum rte_flow_item_type pattern_eth_pppoes_ipv4[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv4[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4db8f1471..add66e683 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -87,7 +87,11 @@
ICE_INSET_TUN_IPV4_TOS)
#define ICE_SW_INSET_MAC_PPPOE ( \
ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
- ICE_INSET_DMAC | ICE_INSET_ETHERTYPE)
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION)
+#define ICE_SW_INSET_MAC_PPPOE_PROTO ( \
+ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \
+ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \
+ ICE_INSET_PPPOE_PROTO)
struct sw_meta {
struct ice_adv_lkup_elem *list;
@@ -135,6 +139,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes,
ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
};
static struct
@@ -316,12 +324,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
+ const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
+ *pppoe_proto_mask;
uint8_t ipv6_addr_mask[16] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
+ uint16_t pppoe_valid = 0;
for (item = pattern; item->type !=
@@ -885,14 +896,75 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
pppoe_mask = item->mask;
/* Check if PPPoE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
- if (pppoe_spec || pppoe_mask) {
+ if ((!pppoe_spec && pppoe_mask) ||
+ (pppoe_spec && !pppoe_mask)) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid pppoe item");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe item");
return 0;
}
+ if (pppoe_spec && pppoe_mask) {
+ /* Check pppoe mask and update input set */
+ if (pppoe_mask->length ||
+ pppoe_mask->code ||
+ pppoe_mask->version_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe mask");
+ return 0;
+ }
+ list[t].type = ICE_PPPOE;
+ if (pppoe_mask->session_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.session_id =
+ pppoe_spec->session_id;
+ list[t].m_u.pppoe_hdr.session_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_SESSION;
+ }
+ t++;
+ pppoe_valid = 1;
+ } else if (!pppoe_spec && !pppoe_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
+ pppoe_proto_spec = item->spec;
+ pppoe_proto_mask = item->mask;
+ /* Check if PPPoE optional proto_id item
+ * is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pppoe_proto_spec && pppoe_proto_mask) ||
+ (pppoe_proto_spec && !pppoe_proto_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pppoe proto item");
+ return 0;
+ }
+ if (pppoe_proto_spec && pppoe_proto_mask) {
+ if (pppoe_valid)
+ t--;
+ list[t].type = ICE_PPPOE;
+ if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ list[t].h_u.pppoe_hdr.ppp_prot_id =
+ pppoe_proto_spec->proto_id;
+ list[t].m_u.pppoe_hdr.ppp_prot_id =
+ UINT16_MAX;
+ input_set |= ICE_INSET_PPPOE_PROTO;
+ }
+ t++;
+ } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
+ list[t].type = ICE_PPPOE;
+ }
+
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 03/13] net/ice: change swicth parser to support flexible mask
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 02/13] net/ice: support for more PPPoE input set Wei Zhao
@ 2020-04-03 4:45 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
` (10 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:45 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 318 ++++++++++++----------------
1 file changed, 140 insertions(+), 178 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index add66e683..4edaea3f5 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -351,19 +348,31 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_TUN_SMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_TUN_DMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
+ const uint8_t *a = eth_mask->src.addr_bytes;
+ const uint8_t *b = eth_mask->dst.addr_bytes;
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (a[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_SMAC;
+ break;
+ } else if (a[j]) {
+ input_set |=
+ ICE_INSET_SMAC;
+ break;
+ }
+ }
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (b[j] && tunnel_valid) {
+ input_set |=
+ ICE_INSET_TUN_DMAC;
+ break;
+ } else if (b[j]) {
+ input_set |=
+ ICE_INSET_DMAC;
+ break;
+ }
+ }
+ if (eth_mask->type)
input_set |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +382,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->src.addr_bytes[j]) {
h->src_addr[j] =
eth_spec->src.addr_bytes[j];
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
}
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
eth_spec->dst.addr_bytes[j];
m->dst_addr[j] =
@@ -392,17 +399,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (i)
t++;
- if (eth_mask->type == UINT16_MAX) {
+ if (eth_mask->type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
+ eth_mask->type;
t++;
}
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
}
break;
@@ -423,81 +427,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_TUN_IPV4_TOS;
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |=
ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |=
ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |=
ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_TUN_IPV4_PROTO;
} else {
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_IPV4_TOS;
}
list[t].type = (tunnel_valid == 0) ?
ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.src_addr) {
list[t].h_u.ipv4_hdr.src_addr =
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.src_addr;
}
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.dst_addr;
}
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
+ ipv4_mask->hdr.time_to_live;
}
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
+ ipv4_mask->hdr.next_proto_id;
}
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
+ if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ list[t].m_u.ipv4_hdr.tos =
+ ipv4_mask->hdr.type_of_service;
}
t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tunnel_valid == 0) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -513,54 +504,53 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
- if (tunnel_valid) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |=
- ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j] &&
+ tunnel_valid) {
input_set |=
- ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ ICE_INSET_TUN_IPV6_SRC;
+ break;
+ } else if (ipv6_mask->hdr.src_addr[j]) {
+ input_set |= ICE_INSET_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] &&
+ tunnel_valid) {
input_set |=
+ ICE_INSET_TUN_IPV6_DST;
+ break;
+ } else if (ipv6_mask->hdr.dst_addr[j]) {
+ input_set |= ICE_INSET_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
+ else if (ipv6_mask->hdr.proto)
+ input_set |=
+ ICE_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.hop_limits)
+ input_set |=
+ ICE_INSET_IPV6_HOP_LIMIT;
+ if ((ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |=
+ (RTE_IPV6_HDR_TC_MASK)) &&
+ tunnel_valid)
+ input_set |=
ICE_INSET_TUN_IPV6_TC;
- } else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
- input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
- input_set |=
- ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ else if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- input_set |= ICE_INSET_IPV6_TC;
- }
+ input_set |= ICE_INSET_IPV6_TC;
+
list[t].type = (tunnel_valid == 0) ?
ICE_IPV6_OFOS : ICE_IPV6_IL;
struct ice_ipv6_hdr *f;
@@ -568,35 +558,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr[j]) {
f->src_addr[j] =
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
}
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
}
}
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ if (ipv6_mask->hdr.proto) {
f->next_hdr =
ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
+ s->next_hdr =
+ ipv6_mask->hdr.proto;
}
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
+ s->hop_limit =
+ ipv6_mask->hdr.hop_limits;
}
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK)) {
struct ice_le_ver_tc_flow vtf;
vtf.u.fld.version = 0;
@@ -606,13 +594,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
- vtf.u.fld.tc = UINT8_MAX;
+ vtf.u.fld.tc = (rte_be_to_cpu_32
+ (ipv6_mask->hdr.vtc_flow) &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
}
t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -631,21 +619,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_UDP_DST_PORT;
} else {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_UDP_DST_PORT;
}
@@ -654,21 +638,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].type = ICE_UDP_OF;
else
list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
+ if (udp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
}
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
}
t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
}
break;
@@ -692,40 +674,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_TCP_DST_PORT;
} else {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TCP_DST_PORT;
}
list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ if (tcp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
}
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
}
t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
}
break;
@@ -743,40 +719,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_SCTP_DST_PORT;
} else {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_SCTP_DST_PORT;
}
list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ if (sctp_mask->hdr.src_port) {
list[t].h_u.sctp_hdr.src_port =
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
}
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
}
t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
}
break;
@@ -799,21 +769,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
+ if (vxlan_mask->vni[0] ||
+ vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) {
list[t].h_u.tnl_hdr.vni =
(vxlan_spec->vni[2] << 16) |
(vxlan_spec->vni[1] << 8) |
vxlan_spec->vni[0];
list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
+ (vxlan_mask->vni[2] << 16) |
+ (vxlan_mask->vni[1] << 8) |
+ vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
}
t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
}
break;
@@ -835,21 +805,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
+ if (nvgre_mask->tni[0] ||
+ nvgre_mask->tni[1] ||
+ nvgre_mask->tni[2]) {
list[t].h_u.nvgre_hdr.tni_flow =
(nvgre_spec->tni[2] << 16) |
(nvgre_spec->tni[1] << 8) |
nvgre_spec->tni[0];
list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
+ (nvgre_mask->tni[2] << 16) |
+ (nvgre_mask->tni[1] << 8) |
+ nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
}
t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
}
break;
@@ -870,23 +840,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (vlan_spec && vlan_mask) {
list[t].type = ICE_VLAN_OFOS;
- if (vlan_mask->tci == UINT16_MAX) {
+ if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
- UINT16_MAX;
+ vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
}
- if (vlan_mask->inner_type == UINT16_MAX) {
+ if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
- UINT16_MAX;
+ vlan_mask->inner_type;
input_set |= ICE_INSET_VLAN_OUTER;
}
t++;
- } else if (!vlan_spec && !vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
}
break;
@@ -918,19 +886,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
list[t].type = ICE_PPPOE;
- if (pppoe_mask->session_id == UINT16_MAX) {
+ if (pppoe_mask->session_id) {
list[t].h_u.pppoe_hdr.session_id =
pppoe_spec->session_id;
list[t].m_u.pppoe_hdr.session_id =
- UINT16_MAX;
+ pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
}
t++;
pppoe_valid = 1;
- } else if (!pppoe_spec && !pppoe_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +918,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (pppoe_valid)
t--;
list[t].type = ICE_PPPOE;
- if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ if (pppoe_proto_mask->proto_id) {
list[t].h_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_spec->proto_id;
list[t].m_u.pppoe_hdr.ppp_prot_id =
- UINT16_MAX;
+ pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
}
t++;
- } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 04/13] net/ice: add support for MAC VLAN rule
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (2 preceding siblings ...)
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 05/13] net/ice: change default tunnle type Wei Zhao
` (9 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add support for MAC VLAN rule,
it enable swicth filter to direct packet base on
mac address and vlan id.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4edaea3f5..ed02d9805 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,9 @@
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define ICE_SW_INSET_MAC_VLAN ( \
+ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \
+ ICE_INSET_VLAN_OUTER)
#define ICE_SW_INSET_MAC_IPV4 ( \
ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
@@ -107,6 +110,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
@@ -149,6 +154,8 @@ static struct
ice_pattern_match_item ice_switch_pattern_dist_os[] = {
{pattern_ethertype,
ICE_SW_INSET_ETHER, ICE_INSET_NONE},
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_arp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -179,6 +186,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype_vlan,
+ ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE},
{pattern_eth_ipv4_udp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 05/13] net/ice: change default tunnle type
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (3 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 06/13] net/ice: add action number check for swicth Wei Zhao
` (8 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The default tunnle type for swicth filter change to new
defination of ICE_SW_TUN_AND_NON_TUN in order that the rule
will be apply to more packet type.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index ed02d9805..d9bdf9637 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1091,7 +1091,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
uint16_t lkups_num = 0;
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
- enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+ enum ice_sw_tunnel_type tun_type =
+ ICE_SW_TUN_AND_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 06/13] net/ice: add action number check for swicth
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (4 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 05/13] net/ice: change default tunnle type Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
` (7 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The action number can only be one for DCF or PF
switch filter, not support multiple actions.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 48 +++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index d9bdf9637..cc48f22dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1073,6 +1073,46 @@ ice_switch_parse_action(struct ice_pf *pf,
return -rte_errno;
}
+static int
+ice_switch_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VF:
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (actions_num > 1) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1158,6 +1198,14 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ ret = ice_switch_check_action(actions, error);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Invalid input action number");
+ goto error;
+ }
+
if (ad->hw.dcf_enabled)
ret = ice_switch_parse_dcf_action(actions, error, &rule_info);
else
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 07/13] net/ice: add support for ESP/AH/L2TP
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (5 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 06/13] net/ice: add action number check for swicth Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 08/13] net/ice: add support for PFCP Wei Zhao
` (6 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add support for ESP/AH/L2TP packets,
it enable swicth filter to direct ipv6 packets with
ESP/AH/L2TP payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 19 +++++++
drivers/net/ice/ice_generic_flow.h | 9 +++
drivers/net/ice/ice_switch_filter.c | 87 +++++++++++++++++++++++++++--
3 files changed, 109 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 0fdc7e617..189ef6c4a 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1382,6 +1382,25 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = {
RTE_FLOW_ITEM_TYPE_ICMP6,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
struct rte_flow *flow,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 3361ecbd9..006fd00b3 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -391,6 +391,15 @@ extern enum rte_flow_item_type pattern_eth_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_vlan_pppoes_ipv6_icmp6[];
extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
+/* ESP */
+extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+
+/* AH */
+extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+
+/* L2TP */
+extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index cc48f22dd..9c87a16dd 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -148,6 +148,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_vlan_pppoes_proto,
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -212,6 +218,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_ipv6_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_l2tp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -319,7 +331,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
struct rte_flow_error *error,
struct ice_adv_lkup_elem *list,
uint16_t *lkups_num,
- enum ice_sw_tunnel_type tun_type)
+ enum ice_sw_tunnel_type *tun_type)
{
const struct rte_flow_item *item = pattern;
enum rte_flow_item_type item_type;
@@ -335,10 +347,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
+ const struct rte_flow_item_ah *ah_spec, *ah_mask;
+ const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
+ uint16_t ipv6_valiad = 0;
for (item = pattern; item->type !=
@@ -504,6 +520,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_IPV6:
ipv6_spec = item->spec;
ipv6_mask = item->mask;
+ ipv6_valiad = 1;
if (ipv6_spec && ipv6_mask) {
if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
@@ -642,7 +659,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
input_set |=
ICE_INSET_UDP_DST_PORT;
}
- if (tun_type == ICE_SW_TUN_VXLAN &&
+ if (*tun_type == ICE_SW_TUN_VXLAN &&
tunnel_valid == 0)
list[t].type = ICE_UDP_OF;
else
@@ -938,6 +955,48 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+ if (esp_spec || esp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid esp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_AH:
+ ah_spec = item->spec;
+ ah_mask = item->mask;
+ if (ah_spec || ah_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ah item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
+ l2tp_spec = item->spec;
+ l2tp_mask = item->mask;
+ if (l2tp_spec || l2tp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid l2tp item");
+ return -ENOTSUP;
+ }
+ if (ipv6_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1113,6 +1172,21 @@ ice_switch_check_action(const struct rte_flow_action *actions,
return 0;
}
+static bool
+ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
+{
+ switch (tun_type) {
+ case ICE_SW_TUN_PROFID_IPV6_ESP:
+ case ICE_SW_TUN_PROFID_IPV6_AH:
+ case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int
ice_switch_parse_pattern_action(struct ice_adapter *ad,
struct ice_pattern_match_item *array,
@@ -1168,8 +1242,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
return -rte_errno;
}
- rule_info.tun_type = tun_type;
-
sw_meta_ptr =
rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0);
if (!sw_meta_ptr) {
@@ -1189,8 +1261,9 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
}
inputset = ice_switch_inset_get
- (pattern, error, list, &lkups_num, tun_type);
- if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) {
+ (pattern, error, list, &lkups_num, &tun_type);
+ if ((!inputset && !ice_is_profile_rule(tun_type)) ||
+ (inputset & ~pattern_match_item->input_set_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
pattern,
@@ -1198,6 +1271,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
goto error;
}
+ rule_info.tun_type = tun_type;
+
ret = ice_switch_check_action(actions, error);
if (ret) {
rte_flow_error_set(error, EINVAL,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 08/13] net/ice: add support for PFCP
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (6 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
` (5 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter support for PFCP packets,
it enable swicth filter to direct ipv4/ipv6 packets with
PFCP session or node payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 15 +++++++
drivers/net/ice/ice_generic_flow.h | 4 ++
drivers/net/ice/ice_switch_filter.c | 62 +++++++++++++++++++++++++++++
3 files changed, 81 insertions(+)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 189ef6c4a..04dcaba08 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1400,6 +1400,21 @@ enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_PFCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 006fd00b3..8a866d2ee 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -400,6 +400,10 @@ extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
+/* PFCP */
+extern enum rte_flow_item_type pattern_eth_ipv4_pfcp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_pfcp[];
+
struct ice_adapter;
extern const struct rte_flow_ops ice_flow_ops;
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9c87a16dd..9b4b9346c 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -154,6 +154,10 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -224,6 +228,10 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_pfcp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -350,6 +358,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_esp *esp_spec, *esp_mask;
const struct rte_flow_item_ah *ah_spec, *ah_mask;
const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
+ const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -996,6 +1005,55 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3;
break;
+ case RTE_FLOW_ITEM_TYPE_PFCP:
+ pfcp_spec = item->spec;
+ pfcp_mask = item->mask;
+ /* Check if PFCP item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!pfcp_spec && pfcp_mask) ||
+ (pfcp_spec && !pfcp_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid PFCP item");
+ return -ENOTSUP;
+ }
+ if (pfcp_spec && pfcp_mask) {
+ /* Check pfcp mask and update input set */
+ if (pfcp_mask->msg_type ||
+ pfcp_mask->msg_len ||
+ pfcp_mask->seid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pfcp mask");
+ return -ENOTSUP;
+ }
+ if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01 &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ pfcp_spec->s_field == 0x01)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field &&
+ ipv6_valiad)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV6_PFCP_NODE;
+ else if (pfcp_mask->s_field &&
+ !pfcp_spec->s_field)
+ *tun_type =
+ ICE_SW_TUN_PROFID_IPV4_PFCP_NODE;
+ else
+ return -ENOTSUP;
+ }
+ break;
+
case RTE_FLOW_ITEM_TYPE_VOID:
break;
@@ -1179,6 +1237,10 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
+ case ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION:
return true;
default:
break;
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 09/13] net/ice: add support for IPv6 NAT-T
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (7 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 08/13] net/ice: add support for PFCP Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 10/13] net/ice: add more flow support for permission stage Wei Zhao
` (4 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter support for IPv6 NAT-T packets,
it enable swicth filter to direct ipv6 packets with
NAT-T payload to specific action.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 14 ++++++++++++++
drivers/net/ice/ice_generic_flow.h | 2 ++
drivers/net/ice/ice_switch_filter.c | 19 +++++++++++++++++--
3 files changed, 33 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 04dcaba08..3365aeb86 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1394,6 +1394,20 @@ enum rte_flow_item_type pattern_eth_ipv6_ah[] = {
RTE_FLOW_ITEM_TYPE_AH,
RTE_FLOW_ITEM_TYPE_END,
};
+enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_ESP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_AH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV6,
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 8a866d2ee..9fe35df45 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -393,9 +393,11 @@ extern enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[];
/* ESP */
extern enum rte_flow_item_type pattern_eth_ipv6_esp[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_esp[];
/* AH */
extern enum rte_flow_item_type pattern_eth_ipv6_ah[];
+extern enum rte_flow_item_type pattern_eth_ipv6_udp_ah[];
/* L2TP */
extern enum rte_flow_item_type pattern_eth_ipv6_l2tp[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 9b4b9346c..4248b8911 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -150,8 +150,12 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = {
ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -224,8 +228,12 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_esp,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_ah,
ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv6_udp_ah,
+ ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_l2tp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv4_pfcp,
@@ -364,6 +372,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
uint16_t tunnel_valid = 0;
uint16_t pppoe_valid = 0;
uint16_t ipv6_valiad = 0;
+ uint16_t udp_valiad = 0;
for (item = pattern; item->type !=
@@ -642,6 +651,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
case RTE_FLOW_ITEM_TYPE_UDP:
udp_spec = item->spec;
udp_mask = item->mask;
+ udp_valiad = 1;
if (udp_spec && udp_mask) {
/* Check UDP mask and update input set*/
if (udp_mask->hdr.dgram_len ||
@@ -974,7 +984,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid esp item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_ESP;
break;
@@ -988,7 +1000,9 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid ah item");
return -ENOTSUP;
}
- if (ipv6_valiad)
+ if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T;
+ else if (ipv6_valiad)
*tun_type = ICE_SW_TUN_PROFID_IPV6_AH;
break;
@@ -1237,6 +1251,7 @@ ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
+ case ICE_SW_TUN_PROFID_IPV6_NAT_T:
case ICE_SW_TUN_PROFID_IPV4_PFCP_NODE:
case ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION:
case ICE_SW_TUN_PROFID_IPV6_PFCP_NODE:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 10/13] net/ice: add more flow support for permission stage
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (8 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 11/13] net/ice: fix input set of VLAN item Wei Zhao
` (3 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Wei Zhao
This patch add switch filter permission stage support
for more flow pattern in pf only pipeline mode.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 4248b8911..81d069e99 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -200,6 +200,8 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = {
static struct
ice_pattern_match_item ice_switch_pattern_perm[] = {
+ {pattern_ethertype,
+ ICE_SW_INSET_ETHER, ICE_INSET_NONE},
{pattern_ethertype_vlan,
ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE},
{pattern_eth_ipv4,
@@ -226,6 +228,18 @@ ice_pattern_match_item ice_switch_pattern_perm[] = {
ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp,
ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE},
+ {pattern_eth_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoed,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes,
+ ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE},
+ {pattern_eth_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
+ {pattern_eth_vlan_pppoes_proto,
+ ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE},
{pattern_eth_ipv6_esp,
ICE_INSET_NONE, ICE_INSET_NONE},
{pattern_eth_ipv6_udp_esp,
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 11/13] net/ice: fix input set of VLAN item
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (9 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 10/13] net/ice: add more flow support for permission stage Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 12/13] net/ice: enable flow redirect on switch Wei Zhao
` (2 subsequent siblings)
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, stable, Wei Zhao
The input set for inner type of vlan item should
be ICE_INSET_ETHERTYPE, not ICE_INSET_VLAN_OUTER.
This mac vlan filter is also part of DCF switch filter.
Cc: stable@dpdk.org
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 81d069e99..686f9c3e3 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -911,7 +911,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
vlan_mask->inner_type;
- input_set |= ICE_INSET_VLAN_OUTER;
+ input_set |= ICE_INSET_ETHERTYPE;
}
t++;
}
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 12/13] net/ice: enable flow redirect on switch
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (10 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 11/13] net/ice: fix input set of VLAN item Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03 5:09 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Zhang, Qi Z
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Beilei Xing
Enable flow redirect on switch, currently only
support VSI redirect.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 73 +++++++++++++++++++++++++++++
1 file changed, 73 insertions(+)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 686f9c3e3..55a5618a7 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -1420,6 +1420,78 @@ ice_switch_query(struct ice_adapter *ad __rte_unused,
return -rte_errno;
}
+static int
+ice_switch_redirect(struct ice_adapter *ad,
+ struct rte_flow *flow,
+ struct ice_flow_redirect *rd)
+{
+ struct ice_rule_query_data *rdata = flow->rule;
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_adv_lkup_elem *lkups_dp = NULL;
+ struct LIST_HEAD_TYPE *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_hw *hw = &ad->hw;
+ struct ice_switch_info *sw;
+ uint16_t lkups_cnt;
+ int ret;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[rdata->rid].recp_created)
+ return -EINVAL;
+
+ if (rd->type != ICE_FLOW_REDIRECT_VSI)
+ return -ENOTSUP;
+
+ list_head = &sw->recp_list[rdata->rid].filt_rules;
+ LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+ if (rinfo.fltr_rule_id == rdata->rule_id &&
+ rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI &&
+ rinfo.sw_act.vsi_handle == rd->vsi_handle) {
+ lkups_cnt = list_itr->lkups_cnt;
+ lkups_dp = (struct ice_adv_lkup_elem *)
+ ice_memdup(hw, list_itr->lkups,
+ sizeof(*list_itr->lkups) *
+ lkups_cnt, ICE_NONDMA_TO_NONDMA);
+ if (!lkups_dp) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory.");
+ return -EINVAL;
+ }
+
+ break;
+ }
+ }
+
+ if (!lkups_dp)
+ return 0;
+
+ /* Remove the old rule */
+ ret = ice_rem_adv_rule(hw, list_itr->lkups,
+ lkups_cnt, &rinfo);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to delete the old rule %d",
+ rdata->rule_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Update VSI context */
+ hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num;
+
+ /* Replay the rule */
+ ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt,
+ &rinfo, rdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to replay the rule");
+ ret = -EINVAL;
+ }
+
+out:
+ ice_free(hw, lkups_dp);
+ return ret;
+}
+
static int
ice_switch_init(struct ice_adapter *ad)
{
@@ -1465,6 +1537,7 @@ ice_flow_engine ice_switch_engine = {
.create = ice_switch_create,
.destroy = ice_switch_destroy,
.query_count = ice_switch_query,
+ .redirect = ice_switch_redirect,
.free = ice_switch_filter_rule_free,
.type = ICE_FLOW_ENGINE_SWITCH,
};
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH v4 13/13] net/ice: redirect switch rule to new VSI
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (11 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 12/13] net/ice: enable flow redirect on switch Wei Zhao
@ 2020-04-03 4:46 ` Wei Zhao
2020-04-03 5:09 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Zhang, Qi Z
13 siblings, 0 replies; 69+ messages in thread
From: Wei Zhao @ 2020-04-03 4:46 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, nannan.lu, qi.fu, yuan.peng, Beilei Xing
After VF reset, VF's VSI number may be changed,
the switch rule which forwards packet to the old
VSI number should be redirected to the new VSI
number.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/ice/ice_dcf_parent.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
index 37f0e2be2..e05b6b3e5 100644
--- a/drivers/net/ice/ice_dcf_parent.c
+++ b/drivers/net/ice/ice_dcf_parent.c
@@ -19,6 +19,8 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
uint16_t vsi_map)
{
struct ice_vsi_ctx *vsi_ctx;
+ bool first_update = false;
+ uint16_t new_vsi_num;
if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
@@ -35,11 +37,25 @@ ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
vsi_handle);
return;
}
+ hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ first_update = true;
}
- vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
- VIRTCHNL_DCF_VF_VSI_ID_S;
- hw->vsi_ctx[vsi_handle] = vsi_ctx;
+ new_vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
+ VIRTCHNL_DCF_VF_VSI_ID_S;
+
+ /* Redirect rules if vsi mapping table changes. */
+ if (!first_update && vsi_ctx->vsi_num != new_vsi_num) {
+ struct ice_flow_redirect rd;
+
+ memset(&rd, 0, sizeof(struct ice_flow_redirect));
+ rd.type = ICE_FLOW_REDIRECT_VSI;
+ rd.vsi_handle = vsi_handle;
+ rd.new_vsi_num = new_vsi_num;
+ ice_flow_redirect((struct ice_adapter *)hw->back, &rd);
+ } else {
+ vsi_ctx->vsi_num = new_vsi_num;
+ }
PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
vsi_handle, vsi_ctx->vsi_num);
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
` (12 preceding siblings ...)
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
@ 2020-04-03 5:09 ` Zhang, Qi Z
2020-04-04 6:17 ` Ye Xiaolong
13 siblings, 1 reply; 69+ messages in thread
From: Zhang, Qi Z @ 2020-04-03 5:09 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Lu, Nannan, Fu, Qi, Peng, Yuan
> -----Original Message-----
> From: Zhao1, Wei <wei.zhao1@intel.com>
> Sent: Friday, April 3, 2020 12:46 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
> Fu, Qi <qi.fu@intel.com>; Peng, Yuan <yuan.peng@intel.com>
> Subject: [PATCH v4 00/13] add switch filter support for intel DCF
>
> A DCF (Device Config Function) framework has been add for intel device, this
> patch set add add switch filter support for it, this set also fix bugs which block
> this feature.
>
> This patchset is based on:
> [1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
>
> Depends-on: series-8859
>
> v2:
> -add switch filter support for AH/ESP/PFCP packet -fix some patch check
> warning -add flow redirect on switch patch
>
> v3:
> -update commit log
> -update in redirect on switch patch
>
> v4:
> -update as comment
>
> Beilei Xing (2):
> net/ice: enable flow redirect on switch
> net/ice: redirect switch rule to new VSI
>
> Wei Zhao (11):
> net/ice: enable switch flow on DCF
> net/ice: support for more PPPoE input set
> net/ice: change swicth parser to support flexible mask
> net/ice: add support for MAC VLAN rule
> net/ice: change default tunnle type
> net/ice: add action number check for swicth
> net/ice: add support for ESP/AH/L2TP
> net/ice: add support for PFCP
> net/ice: add support for IPv6 NAT-T
> net/ice: add more flow support for permission stage
> net/ice: fix input set of VLAN item
>
> doc/guides/rel_notes/release_20_05.rst | 2 +-
> drivers/net/ice/ice_dcf_ethdev.c | 10 +-
> drivers/net/ice/ice_dcf_parent.c | 30 +-
> drivers/net/ice/ice_fdir_filter.c | 6 +
> drivers/net/ice/ice_generic_flow.c | 61 +++
> drivers/net/ice/ice_generic_flow.h | 24 +
> drivers/net/ice/ice_hash.c | 6 +
> drivers/net/ice/ice_switch_filter.c | 730 +++++++++++++++++++------
> 8 files changed, 682 insertions(+), 187 deletions(-)
>
> --
> 2.19.1
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
^ permalink raw reply [flat|nested] 69+ messages in thread
* Re: [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF
2020-04-03 5:09 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Zhang, Qi Z
@ 2020-04-04 6:17 ` Ye Xiaolong
0 siblings, 0 replies; 69+ messages in thread
From: Ye Xiaolong @ 2020-04-04 6:17 UTC (permalink / raw)
To: Zhang, Qi Z; +Cc: Zhao1, Wei, dev, Lu, Nannan, Fu, Qi, Peng, Yuan
On 04/03, Zhang, Qi Z wrote:
>
>
>> -----Original Message-----
>> From: Zhao1, Wei <wei.zhao1@intel.com>
>> Sent: Friday, April 3, 2020 12:46 PM
>> To: dev@dpdk.org
>> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Nannan <nannan.lu@intel.com>;
>> Fu, Qi <qi.fu@intel.com>; Peng, Yuan <yuan.peng@intel.com>
>> Subject: [PATCH v4 00/13] add switch filter support for intel DCF
>>
>> A DCF (Device Config Function) framework has been add for intel device, this
>> patch set add add switch filter support for it, this set also fix bugs which block
>> this feature.
>>
>> This patchset is based on:
>> [1] https://patchwork.dpdk.org/cover/66480/ : add Intel DCF PMD support
>>
>> Depends-on: series-8859
>>
>> v2:
>> -add switch filter support for AH/ESP/PFCP packet -fix some patch check
>> warning -add flow redirect on switch patch
>>
>> v3:
>> -update commit log
>> -update in redirect on switch patch
>>
>> v4:
>> -update as comment
>>
>> Beilei Xing (2):
>> net/ice: enable flow redirect on switch
>> net/ice: redirect switch rule to new VSI
>>
>> Wei Zhao (11):
>> net/ice: enable switch flow on DCF
>> net/ice: support for more PPPoE input set
>> net/ice: change swicth parser to support flexible mask
>> net/ice: add support for MAC VLAN rule
>> net/ice: change default tunnle type
>> net/ice: add action number check for swicth
>> net/ice: add support for ESP/AH/L2TP
>> net/ice: add support for PFCP
>> net/ice: add support for IPv6 NAT-T
>> net/ice: add more flow support for permission stage
>> net/ice: fix input set of VLAN item
>>
>> doc/guides/rel_notes/release_20_05.rst | 2 +-
>> drivers/net/ice/ice_dcf_ethdev.c | 10 +-
>> drivers/net/ice/ice_dcf_parent.c | 30 +-
>> drivers/net/ice/ice_fdir_filter.c | 6 +
>> drivers/net/ice/ice_generic_flow.c | 61 +++
>> drivers/net/ice/ice_generic_flow.h | 24 +
>> drivers/net/ice/ice_hash.c | 6 +
>> drivers/net/ice/ice_switch_filter.c | 730 +++++++++++++++++++------
>> 8 files changed, 682 insertions(+), 187 deletions(-)
>>
>> --
>> 2.19.1
>
>Acked-by: Qi Zhang <qi.z.zhang@intel.com>
>
Applied to dpdk-next-net-intel, Thanks.
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask
2020-03-13 1:04 [dpdk-dev] [PATCH 0/7] " wei.zhao1@intel.com
@ 2020-03-13 1:04 ` wei.zhao1@intel.com
0 siblings, 0 replies; 69+ messages in thread
From: wei.zhao1@intel.com @ 2020-03-13 1:04 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, xiaolong.ye, Wei Zhao
DCF need to make configuration of flexible mask, that is to say
some iput set mask may be not 0xFFFF type all one bit. In order
to direct L2/IP multicast packets, the mask for source IP maybe
0xF0000000, this patch enable switch filter parser for it.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 298 +++++++++++++---------------
1 file changed, 133 insertions(+), 165 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 39b5c7266..af7e9cb0b 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -326,9 +326,6 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask;
const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec,
*pppoe_proto_mask;
- uint8_t ipv6_addr_mask[16] = {
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
uint64_t input_set = ICE_INSET_NONE;
uint16_t j, t = 0;
uint16_t tunnel_valid = 0;
@@ -351,19 +348,29 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec = item->spec;
eth_mask = item->mask;
if (eth_spec && eth_mask) {
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_TUN_SMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->src))
- input_set |= ICE_INSET_SMAC;
- if (tunnel_valid &&
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_TUN_DMAC;
- else if (
- rte_is_broadcast_ether_addr(ð_mask->dst))
- input_set |= ICE_INSET_DMAC;
- if (eth_mask->type == RTE_BE16(0xffff))
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ if (tunnel_valid)
+ input_set |=
+ ICE_INSET_TUN_SMAC;
+ else
+ input_set |=
+ ICE_INSET_SMAC;
+ break;
+ }
+ }
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->dst.addr_bytes[j]) {
+ if (tunnel_valid)
+ input_set |=
+ ICE_INSET_TUN_DMAC;
+ else
+ input_set |=
+ ICE_INSET_DMAC;
+ break;
+ }
+ }
+ if (eth_mask->type)
input_set |= ICE_INSET_ETHERTYPE;
list[t].type = (tunnel_valid == 0) ?
ICE_MAC_OFOS : ICE_MAC_IL;
@@ -373,16 +380,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
h = &list[t].h_u.eth_hdr;
m = &list[t].m_u.eth_hdr;
for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
- if (eth_mask->src.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->src.addr_bytes[j]) {
h->src_addr[j] =
eth_spec->src.addr_bytes[j];
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
}
- if (eth_mask->dst.addr_bytes[j] ==
- UINT8_MAX) {
+ if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
eth_spec->dst.addr_bytes[j];
m->dst_addr[j] =
@@ -392,17 +397,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (i)
t++;
- if (eth_mask->type == UINT16_MAX) {
+ if (eth_mask->type) {
list[t].type = ICE_ETYPE_OL;
list[t].h_u.ethertype.ethtype_id =
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
- UINT16_MAX;
+ eth_mask->type;
t++;
}
- } else if (!eth_spec && !eth_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_MAC_OFOS : ICE_MAC_IL;
}
break;
@@ -423,81 +425,68 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_TUN_IPV4_TOS;
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |=
ICE_INSET_TUN_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |=
ICE_INSET_TUN_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |=
ICE_INSET_TUN_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_TUN_IPV4_PROTO;
} else {
- if (ipv4_mask->hdr.src_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.src_addr)
input_set |= ICE_INSET_IPV4_SRC;
- if (ipv4_mask->hdr.dst_addr ==
- UINT32_MAX)
+ if (ipv4_mask->hdr.dst_addr)
input_set |= ICE_INSET_IPV4_DST;
- if (ipv4_mask->hdr.time_to_live ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.time_to_live)
input_set |= ICE_INSET_IPV4_TTL;
- if (ipv4_mask->hdr.next_proto_id ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.next_proto_id)
input_set |=
ICE_INSET_IPV4_PROTO;
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX)
+ if (ipv4_mask->hdr.type_of_service)
input_set |=
ICE_INSET_IPV4_TOS;
}
list[t].type = (tunnel_valid == 0) ?
ICE_IPV4_OFOS : ICE_IPV4_IL;
- if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.src_addr) {
list[t].h_u.ipv4_hdr.src_addr =
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.src_addr;
}
- if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
- UINT32_MAX;
+ ipv4_mask->hdr.dst_addr;
}
- if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+ if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
- UINT8_MAX;
+ ipv4_mask->hdr.time_to_live;
}
- if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+ if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
- UINT8_MAX;
+ ipv4_mask->hdr.next_proto_id;
}
- if (ipv4_mask->hdr.type_of_service ==
- UINT8_MAX) {
+ if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
- list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+ list[t].m_u.ipv4_hdr.tos =
+ ipv4_mask->hdr.type_of_service;
}
t++;
- } else if (!ipv4_spec && !ipv4_mask) {
- list[t].type = (tunnel_valid == 0) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -514,51 +503,58 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
input_set |=
- ICE_INSET_TUN_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ ICE_INSET_TUN_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
input_set |=
- ICE_INSET_TUN_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ ICE_INSET_TUN_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto)
input_set |=
ICE_INSET_TUN_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
+ if (ipv6_mask->hdr.hop_limits)
input_set |=
ICE_INSET_TUN_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
input_set |=
ICE_INSET_TUN_IPV6_TC;
} else {
- if (!memcmp(ipv6_mask->hdr.src_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.src_addr)))
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
input_set |= ICE_INSET_IPV6_SRC;
- if (!memcmp(ipv6_mask->hdr.dst_addr,
- ipv6_addr_mask,
- RTE_DIM(ipv6_mask->hdr.dst_addr)))
- input_set |= ICE_INSET_IPV6_DST;
- if (ipv6_mask->hdr.proto == UINT8_MAX)
+ break;
+ }
+ }
+ for (j = 0; j < ICE_IPV6_ADDR_LENGTH;
+ j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
+ input_set |=
+ ICE_INSET_IPV6_DST;
+ break;
+ }
+ }
+ if (ipv6_mask->hdr.proto)
input_set |=
ICE_INSET_IPV6_NEXT_HDR;
- if (ipv6_mask->hdr.hop_limits ==
- UINT8_MAX)
+ if (ipv6_mask->hdr.hop_limits)
input_set |=
ICE_INSET_IPV6_HOP_LIMIT;
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
input_set |= ICE_INSET_IPV6_TC;
}
list[t].type = (tunnel_valid == 0) ?
@@ -568,35 +564,33 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
f = &list[t].h_u.ipv6_hdr;
s = &list[t].m_u.ipv6_hdr;
for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
- if (ipv6_mask->hdr.src_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.src_addr[j]) {
f->src_addr[j] =
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
}
- if (ipv6_mask->hdr.dst_addr[j] ==
- UINT8_MAX) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
}
}
- if (ipv6_mask->hdr.proto == UINT8_MAX) {
+ if (ipv6_mask->hdr.proto) {
f->next_hdr =
ipv6_spec->hdr.proto;
- s->next_hdr = UINT8_MAX;
+ s->next_hdr =
+ ipv6_mask->hdr.proto;
}
- if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+ if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
- s->hop_limit = UINT8_MAX;
+ s->hop_limit =
+ ipv6_mask->hdr.hop_limits;
}
- if ((ipv6_mask->hdr.vtc_flow &
+ if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
- (RTE_IPV6_HDR_TC_MASK))
- == rte_cpu_to_be_32
(RTE_IPV6_HDR_TC_MASK)) {
struct ice_le_ver_tc_flow vtf;
vtf.u.fld.version = 0;
@@ -606,13 +600,13 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
f->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
- vtf.u.fld.tc = UINT8_MAX;
+ vtf.u.fld.tc = (rte_be_to_cpu_32
+ (ipv6_mask->hdr.vtc_flow) &
+ RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
}
t++;
- } else if (!ipv6_spec && !ipv6_mask) {
- list[t].type = (tun_type == ICE_NON_TUN) ?
- ICE_IPV4_OFOS : ICE_IPV4_IL;
}
break;
@@ -631,21 +625,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_UDP_DST_PORT;
} else {
- if (udp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.src_port)
input_set |=
ICE_INSET_UDP_SRC_PORT;
- if (udp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (udp_mask->hdr.dst_port)
input_set |=
ICE_INSET_UDP_DST_PORT;
}
@@ -654,21 +644,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].type = ICE_UDP_OF;
else
list[t].type = ICE_UDP_ILOS;
- if (udp_mask->hdr.src_port == UINT16_MAX) {
+ if (udp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
}
- if (udp_mask->hdr.dst_port == UINT16_MAX) {
+ if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
}
t++;
- } else if (!udp_spec && !udp_mask) {
- list[t].type = ICE_UDP_ILOS;
}
break;
@@ -692,40 +680,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_TCP_DST_PORT;
} else {
- if (tcp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.src_port)
input_set |=
ICE_INSET_TCP_SRC_PORT;
- if (tcp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (tcp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TCP_DST_PORT;
}
list[t].type = ICE_TCP_IL;
- if (tcp_mask->hdr.src_port == UINT16_MAX) {
+ if (tcp_mask->hdr.src_port) {
list[t].h_u.l4_hdr.src_port =
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
}
- if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+ if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
}
t++;
- } else if (!tcp_spec && !tcp_mask) {
- list[t].type = ICE_TCP_IL;
}
break;
@@ -743,40 +725,34 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (tunnel_valid) {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_TUN_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_TUN_SCTP_DST_PORT;
} else {
- if (sctp_mask->hdr.src_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.src_port)
input_set |=
ICE_INSET_SCTP_SRC_PORT;
- if (sctp_mask->hdr.dst_port ==
- UINT16_MAX)
+ if (sctp_mask->hdr.dst_port)
input_set |=
ICE_INSET_SCTP_DST_PORT;
}
list[t].type = ICE_SCTP_IL;
- if (sctp_mask->hdr.src_port == UINT16_MAX) {
+ if (sctp_mask->hdr.src_port) {
list[t].h_u.sctp_hdr.src_port =
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
}
- if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+ if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
}
t++;
- } else if (!sctp_spec && !sctp_mask) {
- list[t].type = ICE_SCTP_IL;
}
break;
@@ -799,21 +775,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
- if (vxlan_mask->vni[0] == UINT8_MAX &&
- vxlan_mask->vni[1] == UINT8_MAX &&
- vxlan_mask->vni[2] == UINT8_MAX) {
+ if (vxlan_mask->vni[0] ||
+ vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) {
list[t].h_u.tnl_hdr.vni =
(vxlan_spec->vni[2] << 16) |
(vxlan_spec->vni[1] << 8) |
vxlan_spec->vni[0];
list[t].m_u.tnl_hdr.vni =
- UINT32_MAX;
+ (vxlan_mask->vni[2] << 16) |
+ (vxlan_mask->vni[1] << 8) |
+ vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
}
t++;
- } else if (!vxlan_spec && !vxlan_mask) {
- list[t].type = ICE_VXLAN;
}
break;
@@ -835,21 +811,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tunnel_valid = 1;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
- if (nvgre_mask->tni[0] == UINT8_MAX &&
- nvgre_mask->tni[1] == UINT8_MAX &&
- nvgre_mask->tni[2] == UINT8_MAX) {
+ if (nvgre_mask->tni[0] ||
+ nvgre_mask->tni[1] ||
+ nvgre_mask->tni[2]) {
list[t].h_u.nvgre_hdr.tni_flow =
(nvgre_spec->tni[2] << 16) |
(nvgre_spec->tni[1] << 8) |
nvgre_spec->tni[0];
list[t].m_u.nvgre_hdr.tni_flow =
- UINT32_MAX;
+ (nvgre_mask->tni[2] << 16) |
+ (nvgre_mask->tni[1] << 8) |
+ nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
}
t++;
- } else if (!nvgre_spec && !nvgre_mask) {
- list[t].type = ICE_NVGRE;
}
break;
@@ -870,23 +846,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
}
if (vlan_spec && vlan_mask) {
list[t].type = ICE_VLAN_OFOS;
- if (vlan_mask->tci == UINT16_MAX) {
+ if (vlan_mask->tci) {
list[t].h_u.vlan_hdr.vlan =
vlan_spec->tci;
list[t].m_u.vlan_hdr.vlan =
- UINT16_MAX;
+ vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
}
- if (vlan_mask->inner_type == UINT16_MAX) {
+ if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
vlan_spec->inner_type;
list[t].m_u.vlan_hdr.type =
- UINT16_MAX;
+ vlan_mask->inner_type;
input_set |= ICE_INSET_VLAN_OUTER;
}
t++;
- } else if (!vlan_spec && !vlan_mask) {
- list[t].type = ICE_VLAN_OFOS;
}
break;
@@ -918,19 +892,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
list[t].type = ICE_PPPOE;
- if (pppoe_mask->session_id == UINT16_MAX) {
+ if (pppoe_mask->session_id) {
list[t].h_u.pppoe_hdr.session_id =
pppoe_spec->session_id;
list[t].m_u.pppoe_hdr.session_id =
- UINT16_MAX;
+ pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
}
t++;
pppoe_valid = 1;
- } else if (!pppoe_spec && !pppoe_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
@@ -953,18 +924,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
if (pppoe_valid)
t--;
list[t].type = ICE_PPPOE;
- if (pppoe_proto_mask->proto_id == UINT16_MAX) {
+ if (pppoe_proto_mask->proto_id) {
list[t].h_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_spec->proto_id;
list[t].m_u.pppoe_hdr.ppp_prot_id =
- UINT16_MAX;
+ pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
}
t++;
- } else if (!pppoe_proto_spec && !pppoe_proto_mask) {
- list[t].type = ICE_PPPOE;
}
-
break;
case RTE_FLOW_ITEM_TYPE_VOID:
--
2.19.1
^ permalink raw reply [flat|nested] 69+ messages in thread
end of thread, other threads:[~2020-04-04 6:21 UTC | newest]
Thread overview: 69+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-13 2:07 [dpdk-dev] [PATCH 0/7] add switch filter support for intel DCF Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 1/7] net/ice: enable switch flow on DCF Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 2/7] net/ice: support for more PPPoE input set Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 4/7] net/ice: add support for MAC VLAN rule Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 5/7] net/ice: change default tunnle type Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 6/7] net/ice: add action number check for swicth Wei Zhao
2020-03-13 2:08 ` [dpdk-dev] [PATCH 7/7] net/ice: fix input set of VLAN item Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 00/13]add switch filter support for intel DCF Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-02 9:31 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-02 9:21 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-02 8:29 ` Zhang, Qi Z
2020-04-02 8:31 ` Zhao1, Wei
2020-04-03 1:49 ` Lu, Nannan
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 09/13] net/ice: add support for NAT-T Wei Zhao
2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 23:37 ` Zhao1, Wei
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 10/13] net/ice: add more flow support for permit mode Wei Zhao
2020-04-02 8:45 ` Zhang, Qi Z
2020-04-02 9:41 ` Zhao1, Wei
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-02 7:34 ` Wang, Haiyue
2020-04-02 7:38 ` Xing, Beilei
2020-04-02 6:46 ` [dpdk-dev] [PATCH v2 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-02 7:32 ` Wang, Haiyue
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-03 3:15 ` Zhang, Qi Z
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-03 3:16 ` Zhang, Qi Z
2020-04-03 3:18 ` Zhao1, Wei
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 10/13] net/ice: add more flow support for permit stage Wei Zhao
2020-04-03 3:20 ` Zhang, Qi Z
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-03 2:43 ` [dpdk-dev] [PATCH v3 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 01/13] net/ice: enable switch flow on DCF Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 02/13] net/ice: support for more PPPoE input set Wei Zhao
2020-04-03 4:45 ` [dpdk-dev] [PATCH v4 03/13] net/ice: change swicth parser to support flexible mask Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 04/13] net/ice: add support for MAC VLAN rule Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 05/13] net/ice: change default tunnle type Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 06/13] net/ice: add action number check for swicth Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 07/13] net/ice: add support for ESP/AH/L2TP Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 08/13] net/ice: add support for PFCP Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 09/13] net/ice: add support for IPv6 NAT-T Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 10/13] net/ice: add more flow support for permission stage Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 11/13] net/ice: fix input set of VLAN item Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 12/13] net/ice: enable flow redirect on switch Wei Zhao
2020-04-03 4:46 ` [dpdk-dev] [PATCH v4 13/13] net/ice: redirect switch rule to new VSI Wei Zhao
2020-04-03 5:09 ` [dpdk-dev] [PATCH v4 00/13] add switch filter support for intel DCF Zhang, Qi Z
2020-04-04 6:17 ` Ye Xiaolong
-- strict thread matches above, loose matches on Subject: below --
2020-03-13 1:04 [dpdk-dev] [PATCH 0/7] " wei.zhao1@intel.com
2020-03-13 1:04 ` [dpdk-dev] [PATCH 3/7] net/ice: change swicth parser to support flexible mask wei.zhao1@intel.com
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).